hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d8d2b3f0bd77b8bc17ba513edbfa9199a11e625d | 708 | py | Python | demo/logging_demo.py | doppleware/vCenterShell_test | c91870169c5780e5c561b2ae682991af20257c4f | [
"Apache-2.0"
] | null | null | null | demo/logging_demo.py | doppleware/vCenterShell_test | c91870169c5780e5c561b2ae682991af20257c4f | [
"Apache-2.0"
] | null | null | null | demo/logging_demo.py | doppleware/vCenterShell_test | c91870169c5780e5c561b2ae682991af20257c4f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Pretty much trivial logging demo
Default log file - './logs/vCenter.log'
"""
from common.logger import getLogger
from common.logger import configure_loglevel
_logger = getLogger(__name__) # Default logger is using
# _logger = getLogger("vCenterShell") # for Shell App itself
# _logger = getLogger("vCenterCommon") # for Common Utilises
# ONLY IF YOU WANTED CONFIGURE LOG MANUALLY
configure_loglevel("INFO", "INFO", "../../logs/vCenter.log")
if __name__ == "__main__":
_logger.debug("DEBUG SHOULD BE SKIPPED")
_logger.info("INFO IS OK")
_logger.warn("WARNING IS OK")
_logger.error("ERROR IS OK!!!")
_logger.critical("CRITICAL IS OK ?!!!!") | 30.782609 | 65 | 0.686441 | # -*- coding: utf-8 -*-
"""
Pretty much trivial logging demo
Default log file - './logs/vCenter.log'
"""
from common.logger import getLogger
from common.logger import configure_loglevel
_logger = getLogger(__name__) # Default logger is using
# _logger = getLogger("vCenterShell") # for Shell App itself
# _logger = getLogger("vCenterCommon") # for Common Utilises
# ONLY IF YOU WANTED CONFIGURE LOG MANUALLY
configure_loglevel("INFO", "INFO", "../../logs/vCenter.log")
if __name__ == "__main__":
_logger.debug("DEBUG SHOULD BE SKIPPED")
_logger.info("INFO IS OK")
_logger.warn("WARNING IS OK")
_logger.error("ERROR IS OK!!!")
_logger.critical("CRITICAL IS OK ?!!!!") | 0 | 0 | 0 |
f4d7cf1f1b5f718ad348ade9d69c850855ba356a | 223 | py | Python | array_api_tests/meta/test_utils.py | data-apis/array-api-tests | 797537e4d5c8d3ceee1ee2f2cffdb1dad79e362f | [
"MIT"
] | 30 | 2020-11-10T15:38:35.000Z | 2022-03-31T04:24:05.000Z | array_api_tests/meta/test_utils.py | data-apis/array-api-tests | 797537e4d5c8d3ceee1ee2f2cffdb1dad79e362f | [
"MIT"
] | 83 | 2021-05-18T15:33:49.000Z | 2022-03-24T18:07:05.000Z | array_api_tests/meta/test_utils.py | data-apis/array-api-tests | 797537e4d5c8d3ceee1ee2f2cffdb1dad79e362f | [
"MIT"
] | 21 | 2021-01-25T17:52:15.000Z | 2022-03-31T04:24:11.000Z | from ..test_signatures import extension_module
| 22.3 | 47 | 0.811659 | from ..test_signatures import extension_module
def test_extension_module_is_extension():
assert extension_module('linalg')
def test_extension_func_is_not_extension():
assert not extension_module('linalg.cross')
| 128 | 0 | 46 |
41c0ad69ca674a52715ca1f5adce90cb84fed50c | 489 | py | Python | proma/clients/filters.py | erickgnavar/Proma | 159051f4247700166f063075b3819ae426f6d337 | [
"MIT"
] | 3 | 2018-01-22T08:50:38.000Z | 2021-07-16T04:08:28.000Z | proma/clients/filters.py | erickgnavar/Proma | 159051f4247700166f063075b3819ae426f6d337 | [
"MIT"
] | 13 | 2019-05-27T03:08:29.000Z | 2020-01-03T03:36:04.000Z | proma/clients/filters.py | erickgnavar/Proma | 159051f4247700166f063075b3819ae426f6d337 | [
"MIT"
] | 1 | 2019-10-03T17:52:29.000Z | 2019-10-03T17:52:29.000Z | import django_filters
from proma.common.helpers import CommonFilterHelper
from .models import Client
| 24.45 | 62 | 0.699387 | import django_filters
from proma.common.helpers import CommonFilterHelper
from .models import Client
class ClientFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_expr="icontains")
alias = django_filters.CharFilter(lookup_expr="icontains")
class Meta:
model = Client
fields = ("status", "alias", "name")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = CommonFilterHelper()
| 100 | 262 | 23 |
745c8a3b47a11f802f9fea02b2407bc6909332df | 3,671 | py | Python | StudyWatch_SDK_healthwearableApp/Source/sdk/inc/m2m2/master_definitions/ps_application_interface.py | analogdevicesinc/ApplicationsWaveTool | 0c1f236dd0745caa3187841ee1a882f209ac3ebe | [
"Apache-2.0"
] | 2 | 2019-03-11T15:24:51.000Z | 2022-03-07T09:42:05.000Z | M4_Eval_SDK/Source/inc/m2m2/master_definitions/ps_application_interface.py | analogdevicesinc/ApplicationsWaveTool | 0c1f236dd0745caa3187841ee1a882f209ac3ebe | [
"Apache-2.0"
] | null | null | null | M4_Eval_SDK/Source/inc/m2m2/master_definitions/ps_application_interface.py | analogdevicesinc/ApplicationsWaveTool | 0c1f236dd0745caa3187841ee1a882f209ac3ebe | [
"Apache-2.0"
] | 1 | 2021-03-16T08:26:05.000Z | 2021-03-16T08:26:05.000Z | #!/usr/bin/env python3
from ctypes import *
import m2m2_core
import common_application_interface
M2M2_PS_SYS_PWR_STATE_ENUM_t = {
"type":c_uint8,
"enum_values": [
("M2M2_PS_SYS_PWR_STATE_ACTIVE",0x0),
("M2M2_PS_SYS_PWR_STATE_FLEXI",0x1),
("M2M2_PS_SYS_PWR_STATE_HIBERNATE",0x2),
("M2M2_PS_SYS_PWR_STATE_SHUTDOWN",0x3),
]
}
M2M2_PS_SYS_COMMAND_ENUM_t = {
"type":c_uint8,
"enum_values": [
("__M2M2_PS_SYS_COMMAND_LOWEST",0x40),
("M2M2_PS_SYS_COMMAND_SET_DATE_TIME_REQ",0x42),
("M2M2_PS_SYS_COMMAND_SET_DATE_TIME_RESP",0x43),
("M2M2_PS_SYS_COMMAND_SET_PWR_STATE_REQ",0x44),
("M2M2_PS_SYS_COMMAND_SET_PWR_STATE_RESP",0x45),
("M2M2_PS_SYS_COMMAND_GET_PS_APPS_INFO_REQ",0x48),
("M2M2_PS_SYS_COMMAND_GET_PS_APPS_INFO_RESP",0x49),
("M2M2_PS_SYS_COMMAND_ACTIVATE_TOUCH_SENSOR_REQ",0x4A),
("M2M2_PS_SYS_COMMAND_ACTIVATE_TOUCH_SENSOR_RESP",0x4B),
("M2M2_PS_SYS_COMMAND_DEACTIVATE_TOUCH_SENSOR_REQ",0x4C),
("M2M2_PS_SYS_COMMAND_DEACTIVATE_TOUCH_SENSOR_RESP",0x4D),
("M2M2_PS_SYS_COMMAND_GET_BOARD_INFO_REQ", 0x4E),
("M2M2_PS_SYS_COMMAND_GET_BOARD_INFO_RESP", 0x4F),
("M2M2_PS_SYS_COMMAND_SYSTEM_RESET_REQ",0x50),
("M2M2_PS_SYS_COMMAND_SYSTEM_RESET_RESP",0x51),
]
}
ADI_PS_BOARD_TYPE_t = {
"type":c_uint8,
"enum_values": [
("ADI_PS_BOARD_TYPE_UNKNOWN",0x0),
("ADI_PS_BOARD_TYPE_ADPD107_WATCH",0x1),
("ADI_PS_BOARD_TYPE_ADPD107_CHEST_STRAP",0x2),
("ADI_PS_BOARD_TYPE_ADPD185_WATCH",0x3),
("ADI_PS_BOARD_TYPE_ADPD188_WATCH",0x4),
]
}
M2M2_PS_SYS_STATUS_ENUM_t = {
"type":c_uint8,
"enum_values": [
("__M2M2_PS_SYS_STATUS_LOWEST",0x40),
("M2M2_PS_SYS_STATUS_OK",0x41),
("M2M2_PS_SYS_STATUS_ERR_ARGS",0x42),
("M2M2_PS_SYS_STATUS_ERR_NOT_CHKD",0xFF),
]
}
m2m2_ps_sys_cmd_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
]
}
m2m2_ps_sys_pwr_state_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"state",
"type":M2M2_PS_SYS_PWR_STATE_ENUM_t},
]
}
m2m2_ps_sys_date_time_req_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"year",
"type":c_uint16},
{"name":"month",
"type":c_uint8},
{"name":"day",
"type":c_uint8},
{"name":"hour",
"type":c_uint8},
{"name":"minute",
"type":c_uint8},
{"name":"second",
"type":c_uint8},
{"name":"TZ_sec",
"type":c_uint32},
]
}
m2m2_ps_sys_board_info_req_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"version",
"type":c_uint16},
{"name":"mac_addr",
"length":6,
"type":c_uint8},
{"name":"device_id",
"type":c_uint32},
{"name":"model_number",
"type":c_uint32},
{"name":"hw_id",
"type":c_uint16},
{"name":"bom_id",
"type":c_uint16},
{"name":"batch_id",
"type":c_uint8},
{"name":"date",
"type":c_uint32},
{"name": "board_type",
"type": ADI_PS_BOARD_TYPE_t},
]
}
m2m2_ps_sys_sensor_app_status = {
"struct_fields":[
{"name":"sensor_app_id",
"type":m2m2_core.M2M2_ADDR_ENUM_t},
{"name":"num_subscribers",
"type":c_uint8},
{"name":"num_start_reqs",
"type":c_uint8},
]
}
m2m2_ps_sys_sensor_apps_info_req_t = {
"struct_fields":[
{"name":"command",
"type":c_uint8},
{"name":"status",
"type":c_uint8},
{"name":"num_sensor_apps",
"type":c_uint16},
{"name":"app_info",
"length":15,
"type":m2m2_ps_sys_sensor_app_status},
]
}
| 24.311258 | 64 | 0.672569 | #!/usr/bin/env python3
from ctypes import *
import m2m2_core
import common_application_interface
M2M2_PS_SYS_PWR_STATE_ENUM_t = {
"type":c_uint8,
"enum_values": [
("M2M2_PS_SYS_PWR_STATE_ACTIVE",0x0),
("M2M2_PS_SYS_PWR_STATE_FLEXI",0x1),
("M2M2_PS_SYS_PWR_STATE_HIBERNATE",0x2),
("M2M2_PS_SYS_PWR_STATE_SHUTDOWN",0x3),
]
}
M2M2_PS_SYS_COMMAND_ENUM_t = {
"type":c_uint8,
"enum_values": [
("__M2M2_PS_SYS_COMMAND_LOWEST",0x40),
("M2M2_PS_SYS_COMMAND_SET_DATE_TIME_REQ",0x42),
("M2M2_PS_SYS_COMMAND_SET_DATE_TIME_RESP",0x43),
("M2M2_PS_SYS_COMMAND_SET_PWR_STATE_REQ",0x44),
("M2M2_PS_SYS_COMMAND_SET_PWR_STATE_RESP",0x45),
("M2M2_PS_SYS_COMMAND_GET_PS_APPS_INFO_REQ",0x48),
("M2M2_PS_SYS_COMMAND_GET_PS_APPS_INFO_RESP",0x49),
("M2M2_PS_SYS_COMMAND_ACTIVATE_TOUCH_SENSOR_REQ",0x4A),
("M2M2_PS_SYS_COMMAND_ACTIVATE_TOUCH_SENSOR_RESP",0x4B),
("M2M2_PS_SYS_COMMAND_DEACTIVATE_TOUCH_SENSOR_REQ",0x4C),
("M2M2_PS_SYS_COMMAND_DEACTIVATE_TOUCH_SENSOR_RESP",0x4D),
("M2M2_PS_SYS_COMMAND_GET_BOARD_INFO_REQ", 0x4E),
("M2M2_PS_SYS_COMMAND_GET_BOARD_INFO_RESP", 0x4F),
("M2M2_PS_SYS_COMMAND_SYSTEM_RESET_REQ",0x50),
("M2M2_PS_SYS_COMMAND_SYSTEM_RESET_RESP",0x51),
]
}
ADI_PS_BOARD_TYPE_t = {
"type":c_uint8,
"enum_values": [
("ADI_PS_BOARD_TYPE_UNKNOWN",0x0),
("ADI_PS_BOARD_TYPE_ADPD107_WATCH",0x1),
("ADI_PS_BOARD_TYPE_ADPD107_CHEST_STRAP",0x2),
("ADI_PS_BOARD_TYPE_ADPD185_WATCH",0x3),
("ADI_PS_BOARD_TYPE_ADPD188_WATCH",0x4),
]
}
M2M2_PS_SYS_STATUS_ENUM_t = {
"type":c_uint8,
"enum_values": [
("__M2M2_PS_SYS_STATUS_LOWEST",0x40),
("M2M2_PS_SYS_STATUS_OK",0x41),
("M2M2_PS_SYS_STATUS_ERR_ARGS",0x42),
("M2M2_PS_SYS_STATUS_ERR_NOT_CHKD",0xFF),
]
}
m2m2_ps_sys_cmd_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
]
}
m2m2_ps_sys_pwr_state_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"state",
"type":M2M2_PS_SYS_PWR_STATE_ENUM_t},
]
}
m2m2_ps_sys_date_time_req_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"year",
"type":c_uint16},
{"name":"month",
"type":c_uint8},
{"name":"day",
"type":c_uint8},
{"name":"hour",
"type":c_uint8},
{"name":"minute",
"type":c_uint8},
{"name":"second",
"type":c_uint8},
{"name":"TZ_sec",
"type":c_uint32},
]
}
m2m2_ps_sys_board_info_req_t = {
"struct_fields": [
{"name":None,
"type":common_application_interface._m2m2_app_common_cmd_t},
{"name":"version",
"type":c_uint16},
{"name":"mac_addr",
"length":6,
"type":c_uint8},
{"name":"device_id",
"type":c_uint32},
{"name":"model_number",
"type":c_uint32},
{"name":"hw_id",
"type":c_uint16},
{"name":"bom_id",
"type":c_uint16},
{"name":"batch_id",
"type":c_uint8},
{"name":"date",
"type":c_uint32},
{"name": "board_type",
"type": ADI_PS_BOARD_TYPE_t},
]
}
m2m2_ps_sys_sensor_app_status = {
"struct_fields":[
{"name":"sensor_app_id",
"type":m2m2_core.M2M2_ADDR_ENUM_t},
{"name":"num_subscribers",
"type":c_uint8},
{"name":"num_start_reqs",
"type":c_uint8},
]
}
m2m2_ps_sys_sensor_apps_info_req_t = {
"struct_fields":[
{"name":"command",
"type":c_uint8},
{"name":"status",
"type":c_uint8},
{"name":"num_sensor_apps",
"type":c_uint16},
{"name":"app_info",
"length":15,
"type":m2m2_ps_sys_sensor_app_status},
]
}
| 0 | 0 | 0 |
352512b56666643c75d66e8d77ef4c77c1f177d0 | 445 | py | Python | pymtl3/passes/BasePass.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 152 | 2020-06-03T02:34:11.000Z | 2022-03-30T04:16:45.000Z | pymtl3/passes/BasePass.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 139 | 2019-05-29T00:37:09.000Z | 2020-05-17T16:49:26.000Z | pymtl3/passes/BasePass.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 22 | 2020-05-18T13:42:05.000Z | 2022-03-11T08:37:51.000Z | """
========================================================================
BasePass.py
========================================================================
An "abstract" base class for all passes.
Author : Shunning Jiang
Date : Dec 17, 2017
"""
| 23.421053 | 72 | 0.460674 | """
========================================================================
BasePass.py
========================================================================
An "abstract" base class for all passes.
Author : Shunning Jiang
Date : Dec 17, 2017
"""
class PassMetadata:
pass
class BasePass:
def __init__( self, debug=False ): # initialize parameters
self.debug = debug
def __call__( self, m ): # execute pass on model m
pass
| 98 | -1 | 94 |
a149b74c4b946b172b4c1ca45d12053631b1a421 | 253 | py | Python | faker/providers/ssn/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | 1 | 2021-01-21T03:44:59.000Z | 2021-01-21T03:44:59.000Z | faker/providers/ssn/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | null | null | null | faker/providers/ssn/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import unicode_literals
from .. import BaseProvider
localized = True | 23 | 66 | 0.699605 | # coding=utf-8
from __future__ import unicode_literals
from .. import BaseProvider
localized = True
class Provider(BaseProvider):
ssn_formats = ("###-##-####",)
def ssn(self):
return self.bothify(self.random_element(self.ssn_formats)) | 60 | 70 | 23 |
4c54bda14fbe0222d564445d5656d479e06b4a66 | 2,794 | py | Python | wikidict/lang/it/__init__.py | chopinesque/ebook-reader-dict | 1c69657fd36b339d2fe436e456c1b7891b2d2f9e | [
"MIT"
] | null | null | null | wikidict/lang/it/__init__.py | chopinesque/ebook-reader-dict | 1c69657fd36b339d2fe436e456c1b7891b2d2f9e | [
"MIT"
] | null | null | null | wikidict/lang/it/__init__.py | chopinesque/ebook-reader-dict | 1c69657fd36b339d2fe436e456c1b7891b2d2f9e | [
"MIT"
] | null | null | null | """Italian language."""
from typing import Dict, Tuple
# Regex to find the pronunciation
pronunciation = r"{IPA\|/([^/]+)/"
# Regex to find the gender
gender = r"{{Pn\|?w?}} ''([fm])[singvol ]*''"
# Float number separator
float_separator = ","
# Thousands separator
thousands_separator = " "
# Markers for sections that contain interesting text to analyse.
head_sections = ("{{-it-}}",)
etyl_section = ["{{etim}}"]
sections = (
*head_sections,
*etyl_section,
"{{acron}",
"{{agg}",
"{{avv}",
"{{art}",
"{{cong}",
"{{inter}",
"{{pref}",
"{{Pn}",
"{{prep}",
"{{pron poss}",
"{{suff}",
"{{sost}",
"{{verb}",
)
# Some definitions are not good to keep (plural, gender, ... )
definitions_to_ignore = (
"{{verb form",
"{{nome",
# "{{agg form",
"{{sost form",
"{{-sost form-",
"{{It-conj",
)
# Templates to ignore: the text will be deleted.
templates_ignored: Tuple[str, ...] = tuple()
# Templates that will be completed/replaced using italic style.
templates_italic: Dict[str, str] = {}
# Templates more complex to manage.
templates_multi: Dict[str, str] = {
# {{context|ecology|lang=it}}
"context": "small(term(parts[1]))",
# {{Est|raro|it}}
"Est": "small(term('per estensione'))",
# {{Etim-link|aggrondare}}
# {{Etim-link||cervice}}
"Etim-link": "'Vedi: ' + parts[2] if len(parts) == 3 else 'Vedi: ' + parts[1]",
# {{Glossa|raro|it}}
"Glossa": "small(term(parts[1]))",
# {{la}}
"la": "'latino'",
# {{Lett|non comune|it}}
"Lett": "small(term('letteralmente'))",
# {{Nodef|it}}
"Nodef": "'-definizione mancante-'",
# {{Noetim||it}}
"Noetim": "'-etimologia mancante-'",
# {{Quote|...}}
"Quote": "'«' + parts[1] + '» ' + term(parts[2])",
# {{Tabs|aggrondato|aggrondati|aggrondata|aggrondate}}
"Tabs": "'Masc. sing. ' + parts[1] + ', masc. plur. ' + parts[2] + ', fem. sing. ' + parts[3] + ', fem. plur. ' + parts[4]", # noqa
# {{Taxon|Chromis chromis|Chromis chromis}}
"Taxon": "'la sua classificazione scientifica è ' + strong(italic(parts[1]))",
# {{Term|statistica|it}}
"Term": "small(term(parts[1]))",
"term": "small(term(parts[1]))",
# {{Vd|acre#Italiano|acre}}
"Vd": "'Vedi: ' + parts[2]",
}
# Release content on GitHub
# https://github.com/BoboTiG/ebook-reader-dict/releases/tag/it
release_description = """\
Numero di parole: {words_count}
Export Wiktionary: {dump_date}
File disponibili:
- [Kobo]({url_kobo}) (dicthtml-{locale}.zip)
- [StarDict]({url_stardict}) (dict-{locale}.zip)
- [DictFile]({url_dictfile}) (dict-{locale}.df)
<sub>Aggiornato il {creation_date}</sub>
""" # noqa
# Dictionary name that will be printed below each definition
wiktionary = "Wikizionario (ɔ) {year}"
| 27.392157 | 136 | 0.584109 | """Italian language."""
from typing import Dict, Tuple
# Regex to find the pronunciation
pronunciation = r"{IPA\|/([^/]+)/"
# Regex to find the gender
gender = r"{{Pn\|?w?}} ''([fm])[singvol ]*''"
# Float number separator
float_separator = ","
# Thousands separator
thousands_separator = " "
# Markers for sections that contain interesting text to analyse.
head_sections = ("{{-it-}}",)
etyl_section = ["{{etim}}"]
sections = (
*head_sections,
*etyl_section,
"{{acron}",
"{{agg}",
"{{avv}",
"{{art}",
"{{cong}",
"{{inter}",
"{{pref}",
"{{Pn}",
"{{prep}",
"{{pron poss}",
"{{suff}",
"{{sost}",
"{{verb}",
)
# Some definitions are not good to keep (plural, gender, ... )
definitions_to_ignore = (
"{{verb form",
"{{nome",
# "{{agg form",
"{{sost form",
"{{-sost form-",
"{{It-conj",
)
# Templates to ignore: the text will be deleted.
templates_ignored: Tuple[str, ...] = tuple()
# Templates that will be completed/replaced using italic style.
templates_italic: Dict[str, str] = {}
# Templates more complex to manage.
templates_multi: Dict[str, str] = {
# {{context|ecology|lang=it}}
"context": "small(term(parts[1]))",
# {{Est|raro|it}}
"Est": "small(term('per estensione'))",
# {{Etim-link|aggrondare}}
# {{Etim-link||cervice}}
"Etim-link": "'Vedi: ' + parts[2] if len(parts) == 3 else 'Vedi: ' + parts[1]",
# {{Glossa|raro|it}}
"Glossa": "small(term(parts[1]))",
# {{la}}
"la": "'latino'",
# {{Lett|non comune|it}}
"Lett": "small(term('letteralmente'))",
# {{Nodef|it}}
"Nodef": "'-definizione mancante-'",
# {{Noetim||it}}
"Noetim": "'-etimologia mancante-'",
# {{Quote|...}}
"Quote": "'«' + parts[1] + '» ' + term(parts[2])",
# {{Tabs|aggrondato|aggrondati|aggrondata|aggrondate}}
"Tabs": "'Masc. sing. ' + parts[1] + ', masc. plur. ' + parts[2] + ', fem. sing. ' + parts[3] + ', fem. plur. ' + parts[4]", # noqa
# {{Taxon|Chromis chromis|Chromis chromis}}
"Taxon": "'la sua classificazione scientifica è ' + strong(italic(parts[1]))",
# {{Term|statistica|it}}
"Term": "small(term(parts[1]))",
"term": "small(term(parts[1]))",
# {{Vd|acre#Italiano|acre}}
"Vd": "'Vedi: ' + parts[2]",
}
# Release content on GitHub
# https://github.com/BoboTiG/ebook-reader-dict/releases/tag/it
release_description = """\
Numero di parole: {words_count}
Export Wiktionary: {dump_date}
File disponibili:
- [Kobo]({url_kobo}) (dicthtml-{locale}.zip)
- [StarDict]({url_stardict}) (dict-{locale}.zip)
- [DictFile]({url_dictfile}) (dict-{locale}.df)
<sub>Aggiornato il {creation_date}</sub>
""" # noqa
# Dictionary name that will be printed below each definition
wiktionary = "Wikizionario (ɔ) {year}"
| 0 | 0 | 0 |
ba485ba642b2ca3df6676659e8a5ba9769a6e7e3 | 1,473 | py | Python | summit_xl_localization/scripts/test.py | summitx/summit_xl_common | d95c42889b11616802eb767d10a036937e72d716 | [
"BSD-2-Clause"
] | 44 | 2018-01-05T11:18:05.000Z | 2022-03-24T13:15:28.000Z | summit_xl_localization/scripts/test.py | summitx/summit_xl_common | d95c42889b11616802eb767d10a036937e72d716 | [
"BSD-2-Clause"
] | 14 | 2015-08-26T13:09:12.000Z | 2021-08-25T07:56:40.000Z | summit_xl_localization/scripts/test.py | summitx/summit_xl_common | d95c42889b11616802eb767d10a036937e72d716 | [
"BSD-2-Clause"
] | 42 | 2016-06-13T12:12:02.000Z | 2022-03-18T05:31:12.000Z | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import MagneticField
from math import atan2, sin, cos, sqrt
if __name__ == '__main__':
listener()
| 31.340426 | 130 | 0.642227 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import MagneticField
from math import atan2, sin, cos, sqrt
def callback(data):
#rospy.loginfo(rospy.get_caller_id() + "mx: %s", data.magnetic_field.x)
xmag = data.magnetic_field.x
ymag = data.magnetic_field.y
zmag = data.magnetic_field.z
mag_norm=sqrt((xmag*xmag)+(ymag*ymag)+(zmag*zmag))
# for normalization
magx=xmag/mag_norm
magy=ymag/mag_norm
magz=zmag/mag_norm
Roll = 0;
Pitch = 0;
#yaw =atan2( (-ymag*cos(Roll) + zmag*sin(Roll) ) , (xmag*cos(Pitch) + ymag*sin(Pitch)*sin(Roll)+ zmag*sin(Pitch)*cos(Roll)) )
#yaw =atan2( (-magy*cos(Roll) + magz*sin(Roll) ) , (magx*cos(Pitch) + magy*sin(Pitch)*sin(Roll)+ magz*sin(Pitch)*cos(Roll)) )
cos_pitch = cos(Pitch)
sin_pitch = sin(Pitch)
cos_roll = cos(Roll)
sin_roll = sin(Roll)
t_mag_x = magx * cos_pitch + magz * sin_pitch
t_mag_y = magx * sin_roll * sin_pitch + magy * cos_roll - magz * sin_roll * cos_pitch
head_x = t_mag_x
head_y = t_mag_y
yaw = atan2(head_x, head_y)
rospy.loginfo(rospy.get_caller_id() + "yaw: %s", yaw)
def listener():
rospy.init_node('test', anonymous=True)
rospy.Subscriber("/mavros/imu/mag", MagneticField, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
| 1,228 | 0 | 54 |
30569b5f7f56c0bfdba6e8d976b4ec88d291b3c8 | 7,458 | py | Python | tests/test_functional.py | NarrativeScience/delver | 876480848eff2dec695b3a24ac869433d861ef6c | [
"BSD-3-Clause"
] | 2 | 2020-06-16T08:49:48.000Z | 2020-08-17T06:56:26.000Z | tests/test_functional.py | NarrativeScience/delver | 876480848eff2dec695b3a24ac869433d861ef6c | [
"BSD-3-Clause"
] | 1 | 2020-02-18T00:37:36.000Z | 2020-02-18T19:00:26.000Z | tests/test_functional.py | NarrativeScience/delver | 876480848eff2dec695b3a24ac869433d861ef6c | [
"BSD-3-Clause"
] | null | null | null | """Module containing tests for the delver tool"""
import unittest
from unittest import mock
from context import core as mod_ut
class TestDelveFunctional(unittest.TestCase):
"""Functional tests for the delver tool"""
def setUp(self):
"""Initialize frequently used test objects"""
self.test_obj = {"foo": ["bar", {"baz": 3}]}
print_patch = mock.patch("delver.core.six.print_")
input_patch = mock.patch("delver.core.six_input")
self.fake_print = print_patch.start()
self.fake_input = input_patch.start()
self.addCleanup(print_patch.stop)
self.addCleanup(input_patch.stop)
def _extract_print_strings(self, call_args):
"""Extract the actual strings that make up the calls to the patched
print function.
:param call_args: the list of arguments passed to the patched function
:type call_args: ``list``
:return: list of strings that were arguments to the patched print
function
:rtype: ``list`` of ``str``
"""
return [x[0][0] for x in call_args]
def test_single_navigate(self):
"""Test a single navigation and exit"""
self.fake_input.side_effect = ["0", "q"]
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"]\n'
"List (length 2)\n"
"+-----+------------------+\n"
"| Idx | Data |\n"
"+-----+------------------+\n"
"| 0 | bar |\n"
"| 1 | <dict, length 1> |\n"
"+-----+------------------+"
),
"Bye.",
]
mod_ut.Delver(self.test_obj).run()
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertListEqual(result_print_args, target_print_args)
def test_invalid_key_index(self):
"""Test an invalid index message is displayed"""
self.fake_input.side_effect = ["1", "q"]
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Invalid Index",
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Bye.",
]
mod_ut.Delver(self.test_obj).run()
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
def test_invalid_command(self):
"""Test an invalid command message is displayed"""
self.fake_input.side_effect = ["blah", "q"]
mod_ut.Delver(self.test_obj).run()
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Invalid command; please specify one of ['<key index>', u, q]",
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Bye.",
]
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
def test_advanced_navigation(self):
"""Test navigating deeper into a data structure and back out"""
self.fake_input.side_effect = ["0", "1", "0", "u", "0", "q"]
mod_ut.Delver(self.test_obj).run()
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"]\n'
"List (length 2)\n"
"+-----+------------------+\n"
"| Idx | Data |\n"
"+-----+------------------+\n"
"| 0 | bar |\n"
"| 1 | <dict, length 1> |\n"
"+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]\n'
"Dict (length 1)\n"
"+-----+-----+------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------+\n"
"| 0 | baz | 3 |\n"
"+-----+-----+------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]["baz"]\n'
"+-------+\n"
"| Value |\n"
"+-------+\n"
"| 3 |\n"
"+-------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]\n'
"Dict (length 1)\n"
"+-----+-----+------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------+\n"
"| 0 | baz | 3 |\n"
"+-----+-----+------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]["baz"]\n'
"+-------+\n"
"| Value |\n"
"+-------+\n"
"| 3 |\n"
"+-------+"
),
"Bye.",
]
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
if __name__ == "__main__":
unittest.main()
| 37.104478 | 87 | 0.359748 | """Module containing tests for the delver tool"""
import unittest
from unittest import mock
from context import core as mod_ut
class TestDelveFunctional(unittest.TestCase):
"""Functional tests for the delver tool"""
def setUp(self):
"""Initialize frequently used test objects"""
self.test_obj = {"foo": ["bar", {"baz": 3}]}
print_patch = mock.patch("delver.core.six.print_")
input_patch = mock.patch("delver.core.six_input")
self.fake_print = print_patch.start()
self.fake_input = input_patch.start()
self.addCleanup(print_patch.stop)
self.addCleanup(input_patch.stop)
def _extract_print_strings(self, call_args):
"""Extract the actual strings that make up the calls to the patched
print function.
:param call_args: the list of arguments passed to the patched function
:type call_args: ``list``
:return: list of strings that were arguments to the patched print
function
:rtype: ``list`` of ``str``
"""
return [x[0][0] for x in call_args]
def test_single_navigate(self):
"""Test a single navigation and exit"""
self.fake_input.side_effect = ["0", "q"]
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"]\n'
"List (length 2)\n"
"+-----+------------------+\n"
"| Idx | Data |\n"
"+-----+------------------+\n"
"| 0 | bar |\n"
"| 1 | <dict, length 1> |\n"
"+-----+------------------+"
),
"Bye.",
]
mod_ut.Delver(self.test_obj).run()
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertListEqual(result_print_args, target_print_args)
def test_invalid_key_index(self):
"""Test an invalid index message is displayed"""
self.fake_input.side_effect = ["1", "q"]
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Invalid Index",
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Bye.",
]
mod_ut.Delver(self.test_obj).run()
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
def test_invalid_command(self):
"""Test an invalid command message is displayed"""
self.fake_input.side_effect = ["blah", "q"]
mod_ut.Delver(self.test_obj).run()
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Invalid command; please specify one of ['<key index>', u, q]",
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
"Bye.",
]
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
def test_advanced_navigation(self):
"""Test navigating deeper into a data structure and back out"""
self.fake_input.side_effect = ["0", "1", "0", "u", "0", "q"]
mod_ut.Delver(self.test_obj).run()
target_print_args = [
(
mod_ut.DEFAULT_DIVIDER + "\n"
"At path: root\n"
"Dict (length 1)\n"
"+-----+-----+------------------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------------------+\n"
"| 0 | foo | <list, length 2> |\n"
"+-----+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"]\n'
"List (length 2)\n"
"+-----+------------------+\n"
"| Idx | Data |\n"
"+-----+------------------+\n"
"| 0 | bar |\n"
"| 1 | <dict, length 1> |\n"
"+-----+------------------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]\n'
"Dict (length 1)\n"
"+-----+-----+------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------+\n"
"| 0 | baz | 3 |\n"
"+-----+-----+------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]["baz"]\n'
"+-------+\n"
"| Value |\n"
"+-------+\n"
"| 3 |\n"
"+-------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]\n'
"Dict (length 1)\n"
"+-----+-----+------+\n"
"| Idx | Key | Data |\n"
"+-----+-----+------+\n"
"| 0 | baz | 3 |\n"
"+-----+-----+------+"
),
(
mod_ut.DEFAULT_DIVIDER + "\n"
'At path: root["foo"][1]["baz"]\n'
"+-------+\n"
"| Value |\n"
"+-------+\n"
"| 3 |\n"
"+-------+"
),
"Bye.",
]
result_print_args = self._extract_print_strings(self.fake_print.call_args_list)
self.assertEqual(result_print_args, target_print_args)
if __name__ == "__main__":
unittest.main()
| 0 | 0 | 0 |
1bc1f57102613b47319741afba9c2a3168ef3600 | 2,330 | py | Python | conventional_commits/scripts/commit.py | NoFussComputing/gitlab-ci | 3548312a9f17eb056006aeaa57af00897c9585d8 | [
"MIT"
] | null | null | null | conventional_commits/scripts/commit.py | NoFussComputing/gitlab-ci | 3548312a9f17eb056006aeaa57af00897c9585d8 | [
"MIT"
] | null | null | null | conventional_commits/scripts/commit.py | NoFussComputing/gitlab-ci | 3548312a9f17eb056006aeaa57af00897c9585d8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
#import gitlab
import os
import sys
import getopt
import json
import requests
get_mr_title = False
project_id = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:t:ti:p:b",["token=", "title", "project=", "branch="])
except getopt.GetoptError:
print('test.py [-c | --commit] [-t | --token {token}]')
sys.exit(2)
for opt, arg in opts:
#print('[DEBUG] {0} {1}'.format(opt, arg))
if opt == '-h':
print('[commit.py] -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-t", "--token"):
if arg is None:
raise ValueError('Token switch was specified, however no token was supplied.')
ci_job_token = arg
elif opt in ("-ti", "--title"):
get_mr_title = True
elif opt in ("-p", "--project"):
project_id = str(arg)
elif opt in ("-b", "--branch"):
git_branch = arg
# private token or personal token authentication
#gl = gitlab.Gitlab('https://gitlab.com', private_token=ci_job_token)
url = 'https://gitlab.com/api/v4/projects/' + project_id + '/merge_requests?state=opened&source_branch=' + git_branch
merge_requests = ""
try:
if os.environ['CI_JOB_TOKEN'] is not None:
headers = {'JOB_TOKEN': os.environ['CI_JOB_TOKEN']}
if os.environ['CI_JOB_TOKEN'] == ci_job_token:
headers = {'JOB_TOKEN': os.environ['CI_JOB_TOKEN']}
merge_requests = requests.get(url, headers=headers, data='')
merge_requests = merge_requests.json()
except:
pass
if not isinstance(merge_requests, list):
headers = {'PRIVATE-TOKEN': ci_job_token}
merge_requests = requests.get(url, headers=headers, data='')
merge_requests = merge_requests.json()
#print('\n\nmerge_requests=[-{0}-][]\n\n\n\n\n'.format(merge_requests))
#project_mrs = project.mergerequests.list()
#mrs = gl.mergerequests.list()
mr_title = 'failed to fetch Merge Request title'
mr_first_commit = ''
target_branch = ''
if isinstance(merge_requests, list):
if len(merge_requests) > 0:
for mr in merge_requests:
if mr['source_branch'] == git_branch and str(mr['target_project_id']) == str(project_id) and str(mr['state']) == 'opened':
mr_title = mr['title']
if get_mr_title:
print('{0}'.format(mr_title))
else:
print('ci: No Merge Request found, MR count "0"')
| 23.069307 | 128 | 0.645923 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
#import gitlab
import os
import sys
import getopt
import json
import requests
get_mr_title = False
project_id = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:t:ti:p:b",["token=", "title", "project=", "branch="])
except getopt.GetoptError:
print('test.py [-c | --commit] [-t | --token {token}]')
sys.exit(2)
for opt, arg in opts:
#print('[DEBUG] {0} {1}'.format(opt, arg))
if opt == '-h':
print('[commit.py] -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-t", "--token"):
if arg is None:
raise ValueError('Token switch was specified, however no token was supplied.')
ci_job_token = arg
elif opt in ("-ti", "--title"):
get_mr_title = True
elif opt in ("-p", "--project"):
project_id = str(arg)
elif opt in ("-b", "--branch"):
git_branch = arg
# private token or personal token authentication
#gl = gitlab.Gitlab('https://gitlab.com', private_token=ci_job_token)
url = 'https://gitlab.com/api/v4/projects/' + project_id + '/merge_requests?state=opened&source_branch=' + git_branch
merge_requests = ""
try:
if os.environ['CI_JOB_TOKEN'] is not None:
headers = {'JOB_TOKEN': os.environ['CI_JOB_TOKEN']}
if os.environ['CI_JOB_TOKEN'] == ci_job_token:
headers = {'JOB_TOKEN': os.environ['CI_JOB_TOKEN']}
merge_requests = requests.get(url, headers=headers, data='')
merge_requests = merge_requests.json()
except:
pass
if not isinstance(merge_requests, list):
headers = {'PRIVATE-TOKEN': ci_job_token}
merge_requests = requests.get(url, headers=headers, data='')
merge_requests = merge_requests.json()
#print('\n\nmerge_requests=[-{0}-][]\n\n\n\n\n'.format(merge_requests))
#project_mrs = project.mergerequests.list()
#mrs = gl.mergerequests.list()
mr_title = 'failed to fetch Merge Request title'
mr_first_commit = ''
target_branch = ''
if isinstance(merge_requests, list):
if len(merge_requests) > 0:
for mr in merge_requests:
if mr['source_branch'] == git_branch and str(mr['target_project_id']) == str(project_id) and str(mr['state']) == 'opened':
mr_title = mr['title']
if get_mr_title:
print('{0}'.format(mr_title))
else:
print('ci: No Merge Request found, MR count "0"')
| 0 | 0 | 0 |
0a1d41cfd4c4e5858b93d46432a787506b86a575 | 229 | py | Python | lib/google/sheets/utils.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 206 | 2015-10-15T07:05:08.000Z | 2021-02-19T11:48:36.000Z | lib/google/sheets/utils.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 8 | 2017-10-16T10:18:31.000Z | 2022-03-09T14:24:27.000Z | lib/google/sheets/utils.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 61 | 2015-10-15T08:12:44.000Z | 2022-03-10T12:25:06.000Z | # Third Party (PyPI) Imports
import requests
| 20.818182 | 57 | 0.668122 | # Third Party (PyPI) Imports
import requests
def append_to_sheet(spreadsheet_id, table_range, values):
base_url =
url = base_url % {
'spreadsheet_id' : spreadsheet_id,
'table_range' : table_range,
}
| 160 | 0 | 23 |
c262914ebaed89e6046a1a4e7ccde75b1f4044cf | 1,385 | py | Python | multiple_page_news_scraper.py | RahulRj09/Ndtv-news-live-data-scraper | c18a2e29968845a0aa0ee2ed31eee074686a70c8 | [
"MIT"
] | 7 | 2019-02-19T20:39:46.000Z | 2019-03-03T06:12:30.000Z | multiple_page_news_scraper.py | RahulRj09/Ndtv-news-live-data-scraper | c18a2e29968845a0aa0ee2ed31eee074686a70c8 | [
"MIT"
] | null | null | null | multiple_page_news_scraper.py | RahulRj09/Ndtv-news-live-data-scraper | c18a2e29968845a0aa0ee2ed31eee074686a70c8 | [
"MIT"
] | null | null | null | import time
import requests
from bs4 import BeautifulSoup as bs
url= []
for page in range(1,9):
time.sleep(5)
source = 'https://www.ndtv.com/latest/page-{}'.format(page)
source = requests.get(source).text
soup = bs(source,'html5lib')
soup = soup.find('div',{'class': 'new_storylising'})
for i in soup.find_all("a"):
try:
if (i['href'].startswith('/') or "ndtv" in i['href']):
if i['href'] not in url:
url.append(i["href"])
else:
continue
except KeyError:
pass
total_news = 1;
for i in range(len(url)):
print(url[i])
for i in range(len(url)):
try:
r = requests.get(url[i])
title = bs(r.content, 'html5lib')
print(total_news,"Title = ",title.title.string)
news_div = title.find('div', {'class': 'ins_dateline'})
author = news_div.find('span', {'itemprop': 'name'})
print("Author = ",author.string)
date = title.find('span', {'itemprop': 'dateModified'})
print("Date = ",date.string)
article_div = title.find('div', {'id': 'ins_storybody'})
article_p = article_div.findAll('p')
print("Article = ")
for i in article_p:
try:
if "None" in i.string:
continue
else:
print(i.string)
except TypeError:
pass
total_news +=1
print("--!--=--!--=--!--=--!--=--!--=--!--=--!--=----!!!!!----=--!--=--!--=--!--=--!--=--!--=--!--=--!--")
except AttributeError:
pass | 29.468085 | 108 | 0.572563 | import time
import requests
from bs4 import BeautifulSoup as bs
url= []
for page in range(1,9):
time.sleep(5)
source = 'https://www.ndtv.com/latest/page-{}'.format(page)
source = requests.get(source).text
soup = bs(source,'html5lib')
soup = soup.find('div',{'class': 'new_storylising'})
for i in soup.find_all("a"):
try:
if (i['href'].startswith('/') or "ndtv" in i['href']):
if i['href'] not in url:
url.append(i["href"])
else:
continue
except KeyError:
pass
total_news = 1;
for i in range(len(url)):
print(url[i])
for i in range(len(url)):
try:
r = requests.get(url[i])
title = bs(r.content, 'html5lib')
print(total_news,"Title = ",title.title.string)
news_div = title.find('div', {'class': 'ins_dateline'})
author = news_div.find('span', {'itemprop': 'name'})
print("Author = ",author.string)
date = title.find('span', {'itemprop': 'dateModified'})
print("Date = ",date.string)
article_div = title.find('div', {'id': 'ins_storybody'})
article_p = article_div.findAll('p')
print("Article = ")
for i in article_p:
try:
if "None" in i.string:
continue
else:
print(i.string)
except TypeError:
pass
total_news +=1
print("--!--=--!--=--!--=--!--=--!--=--!--=--!--=----!!!!!----=--!--=--!--=--!--=--!--=--!--=--!--=--!--")
except AttributeError:
pass | 0 | 0 | 0 |
ae27961b297b59c073ccea7eb2808d2a9091e72a | 1,057 | py | Python | app/repositories/queries/blob_queries.py | solarized-penguin/pylocker | 9117e342d6fd33800eaa0dd0ce82e53adafe6a84 | [
"MIT"
] | null | null | null | app/repositories/queries/blob_queries.py | solarized-penguin/pylocker | 9117e342d6fd33800eaa0dd0ce82e53adafe6a84 | [
"MIT"
] | null | null | null | app/repositories/queries/blob_queries.py | solarized-penguin/pylocker | 9117e342d6fd33800eaa0dd0ce82e53adafe6a84 | [
"MIT"
] | null | null | null | create_empty_blob = "SELECT lo_creat(-1)"
write_data_to_blob = "SELECT lo_put(CAST(:loid AS OID), :offset, :data)"
read_data_from_blob = "SELECT lo_get(CAST(:loid AS OID), :offset, :length)"
delete_blob = "SELECT lo_unlink(CAST(:loid AS OID))"
get_size_of_blob_function = """
CREATE OR REPLACE FUNCTION pg_temp.get_lo_size(loid INTEGER)
RETURNS BIGINT AS $lo_size$
DECLARE
file_descriptor INTEGER;
file_size BIGINT;
BEGIN
-- Open large object for reading.
-- Parameter "x'40000'" is equivalent to postgres large object mode "INV_READ"
-- which is necessary for method to work
file_descriptor := lo_open(CAST(loid AS OID), x'40000' :: INT);
-- Seek to the end
-- "Seek" command = "2"
PERFORM lo_lseek64(file_descriptor, 0, 2);
-- Fetch current file position - location of the last byte
file_size := lo_tell64(file_descriptor);
-- Close open file.
PERFORM lo_close(file_descriptor);
RETURN file_size;
END;
$lo_size$
LANGUAGE plpgsql;
"""
get_size_of_blob = "SELECT pg_temp.get_lo_size(:loid);"
| 27.815789 | 82 | 0.710501 | create_empty_blob = "SELECT lo_creat(-1)"
write_data_to_blob = "SELECT lo_put(CAST(:loid AS OID), :offset, :data)"
read_data_from_blob = "SELECT lo_get(CAST(:loid AS OID), :offset, :length)"
delete_blob = "SELECT lo_unlink(CAST(:loid AS OID))"
get_size_of_blob_function = """
CREATE OR REPLACE FUNCTION pg_temp.get_lo_size(loid INTEGER)
RETURNS BIGINT AS $lo_size$
DECLARE
file_descriptor INTEGER;
file_size BIGINT;
BEGIN
-- Open large object for reading.
-- Parameter "x'40000'" is equivalent to postgres large object mode "INV_READ"
-- which is necessary for method to work
file_descriptor := lo_open(CAST(loid AS OID), x'40000' :: INT);
-- Seek to the end
-- "Seek" command = "2"
PERFORM lo_lseek64(file_descriptor, 0, 2);
-- Fetch current file position - location of the last byte
file_size := lo_tell64(file_descriptor);
-- Close open file.
PERFORM lo_close(file_descriptor);
RETURN file_size;
END;
$lo_size$
LANGUAGE plpgsql;
"""
get_size_of_blob = "SELECT pg_temp.get_lo_size(:loid);"
| 0 | 0 | 0 |
b12e76b5801fd1b1a9d9f3df50394777231076d8 | 869 | py | Python | tests/core/filtering/test_filter_against_latest_blocks.py | bhardwajRahul/web3.py | efecadcdea64f9481fcace558a8ea103462e2923 | [
"MIT"
] | 3 | 2022-03-19T08:14:29.000Z | 2022-03-31T12:05:19.000Z | tests/core/filtering/test_filter_against_latest_blocks.py | bhardwajRahul/web3.py | efecadcdea64f9481fcace558a8ea103462e2923 | [
"MIT"
] | 1 | 2022-02-17T20:28:58.000Z | 2022-02-17T20:28:58.000Z | tests/core/filtering/test_filter_against_latest_blocks.py | bhardwajRahul/web3.py | efecadcdea64f9481fcace558a8ea103462e2923 | [
"MIT"
] | 1 | 2022-03-20T11:54:03.000Z | 2022-03-20T11:54:03.000Z | from web3._utils.threads import (
Timeout,
)
from web3.providers.eth_tester import (
EthereumTesterProvider,
)
| 28.032258 | 85 | 0.712313 | from web3._utils.threads import (
Timeout,
)
from web3.providers.eth_tester import (
EthereumTesterProvider,
)
def test_sync_filter_against_latest_blocks(w3, sleep_interval, wait_for_block):
if not isinstance(w3.provider, EthereumTesterProvider):
w3.provider = EthereumTesterProvider()
txn_filter = w3.eth.filter("latest")
current_block = w3.eth.block_number
wait_for_block(w3, current_block + 3)
found_block_hashes = []
with Timeout(5) as timeout:
while len(found_block_hashes) < 3:
found_block_hashes.extend(txn_filter.get_new_entries())
timeout.sleep(sleep_interval())
assert len(found_block_hashes) == 3
expected_block_hashes = [
w3.eth.get_block(n + 1).hash for n in range(current_block, current_block + 3)
]
assert found_block_hashes == expected_block_hashes
| 726 | 0 | 23 |
8611171a15ab73ea43db6eaa20037584f3ce6198 | 3,684 | py | Python | harvest_bigram.py | Ezhil-Language-Foundation/uliveeran | 619b37a158129ff15aa342e662506cd6d23a0e5c | [
"MIT"
] | null | null | null | harvest_bigram.py | Ezhil-Language-Foundation/uliveeran | 619b37a158129ff15aa342e662506cd6d23a0e5c | [
"MIT"
] | null | null | null | harvest_bigram.py | Ezhil-Language-Foundation/uliveeran | 619b37a158129ff15aa342e662506cd6d23a0e5c | [
"MIT"
] | 1 | 2021-11-12T13:35:36.000Z | 2021-11-12T13:35:36.000Z | # -*- coding: utf-8 -*-
# (C) 2016-2021 Muthiah Annamalai
#
# This file is part of 'open-tamil' package
# We generate unigram and bi-gram statistics for Tamil texts
#
import tamil
from ngram.LetterModels import Unigram
import codecs
import pprint
import copy
import operator
from functools import cmp_to_key
import sys
import glob
import os
if __name__ == "__main__":
run('plain_text','pm_bigram_sorted_042521.txt')
| 29.709677 | 84 | 0.610749 | # -*- coding: utf-8 -*-
# (C) 2016-2021 Muthiah Annamalai
#
# This file is part of 'open-tamil' package
# We generate unigram and bi-gram statistics for Tamil texts
#
import tamil
from ngram.LetterModels import Unigram
import codecs
import pprint
import copy
import operator
from functools import cmp_to_key
import sys
import glob
import os
def print_tamil_words_by_frequency(frequency, fp=None):
# sort words by descending order of occurence
if not fp:
fp = sys.stdout
fp.write(u"# unique words = %d\n" % (len(frequency)))
fp.write(u"# sorted in Frequency order\n")
fp.write(u"freqsort_data = [\n")
for k, v in sorted(frequency.items(), key=operator.itemgetter(1), reverse=True):
fp.write(u"[u'%s',%g],\n" % (k, v))
fp.write("]\n")
fp.write(u"#" * 80 + u"\n")
fp.write(u"# sorted in Tamil order\n")
fp.write(u"alphasort_data = [\n")
for l in sorted(
frequency.keys(), key=cmp_to_key(tamil.utf8.compare_words_lexicographic)
):
k, v = l, frequency[l]
fp.write(u"[u'%s',%g],\n" % (k, v))
fp.write("]\n")
return
def get_prob(data):
# adjust for non-zero probability of all symbols
delta = 1e9
data2 = copy.copy(data)
s = 0.0
nzeros = 0
for k, v in data2.items():
s += float(v)
if v == 0:
nzeros += 1
elif v < delta: # and not zero
delta = v
# delta has lowest frequency
delta = float(delta) / 2.0
if nzeros > 0:
s = s + delta * nzeros
print(u"n-zeros = %d,%g" % (nzeros, delta / s))
for k, v in data2.items():
if data2[k] == 0:
data2[k] = delta
data2[k] = float(data2[k]) / s
# fudge adjust so probabilities sum to 1.0
eps = abs(sum([v for k, v in data2.items()]) - 1.0)
data2[k] = data2[k] - eps
return data2
def proc_stats(data, filename):
with codecs.open(filename, "w", "utf-8") as fp:
data_as_prob = get_prob(data)
print_tamil_words_by_frequency(data_as_prob, fp)
return
def get_stats():
obj = Unigram("out-tamil-words.txt")
obj.frequency_model()
with codecs.open("ta_data_freq.txt", "w", "utf-8") as fp:
pprint.pprint(obj.letter, stream=fp)
proc_stats(obj.letter, u"ta_data_freq2.txt")
return
class BigramHash(Unigram):
def __init__(self, filename):
Unigram.__init__(self, filename)
self.bigram = dict()
self.bigram_filename=filename
def frequency_model(self):
""" build a letter frequency model for Tamil letters from a corpus """
prev_letter = None
# use a generator in corpus
letters=list(self.corpus.next_tamil_letter())
if len(letters)<2:
print("WARNING: too small file {0}".format(self.bigram_filename))
return
prev_letter = letters[0]
for next_letter in letters[1:]:
# update frequency from corpus
key = prev_letter + next_letter
val = self.bigram.get(key, None)
prev_letter = next_letter
if not val:
self.bigram[key] = 0
self.bigram[key] += 1
return
def proc_stats2(data,outputfile):
proc_stats(get_prob(data), outputfile)
def get_stats2(filename,prior=None):
obj = BigramHash(filename)
if prior:
obj.bigram.update( prior.bigram )
obj.frequency_model()
return obj
def run(parent,outputfile):
x=None
for filename in glob.glob(os.path.join(parent,"*.word")):
x = get_stats2(filename,x)
proc_stats2(x.bigram,outputfile)
return x
if __name__ == "__main__":
run('plain_text','pm_bigram_sorted_042521.txt')
| 2,343 | 736 | 184 |
d2121a6d2806b8194ac9ecd7d03b1ed5d7880e12 | 144 | py | Python | automatedtest_backend/common/rpc_client.py | zhousheng0325/AutomationPlatform | e7e41e4a26c63fc8151b191bccb0f4a412a3388d | [
"MIT"
] | null | null | null | automatedtest_backend/common/rpc_client.py | zhousheng0325/AutomationPlatform | e7e41e4a26c63fc8151b191bccb0f4a412a3388d | [
"MIT"
] | 10 | 2020-06-06T01:30:29.000Z | 2022-03-12T00:16:52.000Z | automatedtest_backend/common/rpc_client.py | zhousheng0325/AutomationPlatform | e7e41e4a26c63fc8151b191bccb0f4a412a3388d | [
"MIT"
] | null | null | null | import grpc
from django.utils.functional import SimpleLazyObject
client = SimpleLazyObject(lambda:grpc.insecure_channel('127.0.0.1:8001'))
| 28.8 | 74 | 0.798611 | import grpc
from django.utils.functional import SimpleLazyObject
client = SimpleLazyObject(lambda:grpc.insecure_channel('127.0.0.1:8001'))
| 0 | 0 | 0 |
4f602f18cff3560c6a12e793fdb84016fe827f24 | 3,732 | py | Python | chart/controllers/order.py | msamunetogetoge/AutoTrader | 4f5998d070ec4f8f6aee0c80854c4925d6d59dd5 | [
"MIT"
] | 1 | 2021-12-29T02:56:37.000Z | 2021-12-29T02:56:37.000Z | chart/controllers/order.py | msamunetogetoge/AutoTrader | 4f5998d070ec4f8f6aee0c80854c4925d6d59dd5 | [
"MIT"
] | null | null | null | chart/controllers/order.py | msamunetogetoge/AutoTrader | 4f5998d070ec4f8f6aee0c80854c4925d6d59dd5 | [
"MIT"
] | null | null | null | import pybitflyer
import key
from chart.controllers import get_data
import logging
logger = logging.getLogger(__name__)
class BitFlayer_Order():
"""[summary] bitflyer に成り行き注文で売買注文を行うクラス
"""
def AvailableBalance(self):
"""[summary]get available balance from bitflyerapi.
Returns:
[type] dict : [description] like {"JPY": 50000, "BTC_JPY": 0.05} dict.
"""
b = get_data.Balance(self.api_key, self.api_secret, code=self.product_code)
balance_code = self.product_code.split("_")[0]
balance = b.GetBalance()
available_JPY = balance["JPY"]["available"]
available_CRP = balance[balance_code]["available"]
d = {"JPY": available_JPY, self.product_code: available_CRP}
return d
def AdjustSize(self, size=0.00000001):
"""[summary] 手数料込みで取り扱えるsizeを計算する。
Args:
size (float, optional): [description]. Defaults to 0.00000001.
Returns:
[type]float : [description] 1 satoshi 刻み
"""
tax = self.api.gettradingcommission(product_code=self.product_code)["commission_rate"]
useable = 1.0 - tax
size = size * useable
size = int(size * 100000000) / 100000000
return size
def BUY(self, currency, use_parcent=0.9):
"""[summary] 買いたい額を円で指定して、bitflyer から成り行き注文を行う。
売買が成立するとIDを返し、失敗するとNone を返す。
Args:
currency ([type]): [description]
use_parcent (float, optional): [description]. Defaults to 0.9.
Returns:
[type]dict : [description] like{child_order_acceptance_id:xxxxxxxx} or None
"""
ticker = get_data.Ticker(self.api_key, self.api_secret, self.product_code).ticker
price = ticker["best_ask"]
usecurrency = currency * use_parcent
size = 1 / (price / usecurrency)
size = self.AdjustSize(size=size)
size = int(size * 100000000) / 100000000
buy_code = self.api.sendchildorder(
product_code=self.product_code,
child_order_type="MARKET",
side="BUY", size=size,
minute_to_expire=10,
time_in_force="GTC")
if "child_order_acceptance_id" in buy_code.keys():
return buy_code
else:
logger.error(Exception("Cant BUY"))
print("Cant BUY")
return None
def SELL(self, size=0.00000001):
"""[summary] 売りたい量のbitcoinをbitcoinの枚数で指定して、bitflyer から成り行き注文を行う。
売買が成立するとIDを返し、失敗するとNone を返す。
Args:
currency ([type]): [description]
use_parcent (float, optional): [description]. Defaults to 0.9.
code (str, optional): [description]. Defaults to "BTC_JPY".
Returns:
[type]dict : [description] like{child_order_acceptance_id:xxxxxxxx} or None
"""
size = self.AdjustSize(size=size)
size = int(size * 100000000) / 100000000
sell_code = self.api.sendchildorder(
product_code=self.product_code,
child_order_type="MARKET",
side="SELL", size=size,
minute_to_expire=10,
time_in_force="GTC")
if "child_order_acceptance_id" in sell_code.keys():
return sell_code
else:
logger.error(Exception("Cant SELL"))
print("Cant SELL")
return None
| 34.555556 | 95 | 0.590836 | import pybitflyer
import key
from chart.controllers import get_data
import logging
logger = logging.getLogger(__name__)
class BitFlayer_Order():
"""[summary] bitflyer に成り行き注文で売買注文を行うクラス
"""
def __init__(self, api_key, api_secret, product_code="BTC_JPY"):
self.api_key = api_key
self.api_secret = api_secret
self.api = pybitflyer.API(api_key, api_secret)
self.product_code = product_code
def AvailableBalance(self):
"""[summary]get available balance from bitflyerapi.
Returns:
[type] dict : [description] like {"JPY": 50000, "BTC_JPY": 0.05} dict.
"""
b = get_data.Balance(self.api_key, self.api_secret, code=self.product_code)
balance_code = self.product_code.split("_")[0]
balance = b.GetBalance()
available_JPY = balance["JPY"]["available"]
available_CRP = balance[balance_code]["available"]
d = {"JPY": available_JPY, self.product_code: available_CRP}
return d
def AdjustSize(self, size=0.00000001):
"""[summary] 手数料込みで取り扱えるsizeを計算する。
Args:
size (float, optional): [description]. Defaults to 0.00000001.
Returns:
[type]float : [description] 1 satoshi 刻み
"""
tax = self.api.gettradingcommission(product_code=self.product_code)["commission_rate"]
useable = 1.0 - tax
size = size * useable
size = int(size * 100000000) / 100000000
return size
def BUY(self, currency, use_parcent=0.9):
"""[summary] 買いたい額を円で指定して、bitflyer から成り行き注文を行う。
売買が成立するとIDを返し、失敗するとNone を返す。
Args:
currency ([type]): [description]
use_parcent (float, optional): [description]. Defaults to 0.9.
Returns:
[type]dict : [description] like{child_order_acceptance_id:xxxxxxxx} or None
"""
ticker = get_data.Ticker(self.api_key, self.api_secret, self.product_code).ticker
price = ticker["best_ask"]
usecurrency = currency * use_parcent
size = 1 / (price / usecurrency)
size = self.AdjustSize(size=size)
size = int(size * 100000000) / 100000000
buy_code = self.api.sendchildorder(
product_code=self.product_code,
child_order_type="MARKET",
side="BUY", size=size,
minute_to_expire=10,
time_in_force="GTC")
if "child_order_acceptance_id" in buy_code.keys():
return buy_code
else:
logger.error(Exception("Cant BUY"))
print("Cant BUY")
return None
def SELL(self, size=0.00000001):
"""[summary] 売りたい量のbitcoinをbitcoinの枚数で指定して、bitflyer から成り行き注文を行う。
売買が成立するとIDを返し、失敗するとNone を返す。
Args:
currency ([type]): [description]
use_parcent (float, optional): [description]. Defaults to 0.9.
code (str, optional): [description]. Defaults to "BTC_JPY".
Returns:
[type]dict : [description] like{child_order_acceptance_id:xxxxxxxx} or None
"""
size = self.AdjustSize(size=size)
size = int(size * 100000000) / 100000000
sell_code = self.api.sendchildorder(
product_code=self.product_code,
child_order_type="MARKET",
side="SELL", size=size,
minute_to_expire=10,
time_in_force="GTC")
if "child_order_acceptance_id" in sell_code.keys():
return sell_code
else:
logger.error(Exception("Cant SELL"))
print("Cant SELL")
return None
| 211 | 0 | 29 |
818c7147a8c30dcd0e76678ab81abc2752ba7f0a | 2,133 | py | Python | dinamicos-rtdata/mouseplot/mouse_trace.py | ochoadavid/SciPyLA2016-VisPy | 1bcd965e9706bbf48a0aedb1ad200db7edc90ba2 | [
"MIT"
] | 1 | 2016-05-16T16:51:18.000Z | 2016-05-16T16:51:18.000Z | dinamicos-rtdata/mouseplot/mouse_trace.py | ochoadavid/SciPyLA2016-VisPy | 1bcd965e9706bbf48a0aedb1ad200db7edc90ba2 | [
"MIT"
] | null | null | null | dinamicos-rtdata/mouseplot/mouse_trace.py | ochoadavid/SciPyLA2016-VisPy | 1bcd965e9706bbf48a0aedb1ad200db7edc90ba2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
## Autor: David Ochoa
import numpy as np
from vispy.scene.visuals import Line
from pymouse import PyMouse
CV = np.arange(0, 2.05, 0.05, dtype=np.float32) * 3.14159
ZCV = np.zeros(CV.size, dtype=np.float32)
C_xy = np.array([np.cos(CV), np.sin(CV), ZCV]).T
C_xz = np.array([np.cos(CV), ZCV, np.sin(CV)]).T
C_yz = np.array([ZCV, np.cos(CV), np.sin(CV)]).T
sphere_pt = np.concatenate([C_xy, C_xz, C_yz])
class Mouse_trace:
"""Mouse tracing Class. It uses vispy to visualization."""
def set_bound(self, boundaries):
"""Updates the boundaries."""
self.bound = boundaries
self.sizexyz = np.abs(boundaries[:,1] - boundaries[:,0])
def step(self, time_step):
"""Calculate the new position and speed."""
mpos = self.mouse.position()
self.pos = np.asarray([mpos[0], self.bound[1,1] - mpos[1], 0])
self.update_visual()
def init_visual(self, view):
"""Initialize the object visual."""
self.trace = np.repeat(self.pos, self.tail_steps).reshape((3,self.tail_steps)).T
pos = np.concatenate([sphere_pt * self.rad + self.pos, self.trace])
self.visual = Line(pos = pos, color=self.color)
view.add(self.visual)
def update_visual(self):
"""Updates the object visual."""
self.trace[1:] = self.trace[0:-1]
self.trace[0] = self.pos
pos = np.concatenate([sphere_pt * self.rad + self.pos, self.trace])
self.visual.set_data(pos = pos)
def shake(self):
"""Inverts the z position and gives the ball a random velocity."""
pass
if __name__ == '__main__':
print(Ball_trace.__doc__)
exit()
| 33.857143 | 88 | 0.601031 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
## Autor: David Ochoa
import numpy as np
from vispy.scene.visuals import Line
from pymouse import PyMouse
CV = np.arange(0, 2.05, 0.05, dtype=np.float32) * 3.14159
ZCV = np.zeros(CV.size, dtype=np.float32)
C_xy = np.array([np.cos(CV), np.sin(CV), ZCV]).T
C_xz = np.array([np.cos(CV), ZCV, np.sin(CV)]).T
C_yz = np.array([ZCV, np.cos(CV), np.sin(CV)]).T
sphere_pt = np.concatenate([C_xy, C_xz, C_yz])
class Mouse_trace:
"""Mouse tracing Class. It uses vispy to visualization."""
def __init__(self, color = (1.0, 1.0, 1.0, 1.0)):
self.mouse = PyMouse()
self.pos = np.asarray([0, 0, 0])
self.color = color
self.rad = 20.0
size = self.mouse.screen_size()
boundaries = np.asarray([[0, size[0]], [0, size[1]], [0, 1]])
print(boundaries)
self.sizexyz = [None] * 3
self.tail_steps = 200
self.set_bound(boundaries)
self.visual = [None]
def set_bound(self, boundaries):
"""Updates the boundaries."""
self.bound = boundaries
self.sizexyz = np.abs(boundaries[:,1] - boundaries[:,0])
def step(self, time_step):
"""Calculate the new position and speed."""
mpos = self.mouse.position()
self.pos = np.asarray([mpos[0], self.bound[1,1] - mpos[1], 0])
self.update_visual()
def init_visual(self, view):
"""Initialize the object visual."""
self.trace = np.repeat(self.pos, self.tail_steps).reshape((3,self.tail_steps)).T
pos = np.concatenate([sphere_pt * self.rad + self.pos, self.trace])
self.visual = Line(pos = pos, color=self.color)
view.add(self.visual)
def update_visual(self):
"""Updates the object visual."""
self.trace[1:] = self.trace[0:-1]
self.trace[0] = self.pos
pos = np.concatenate([sphere_pt * self.rad + self.pos, self.trace])
self.visual.set_data(pos = pos)
def shake(self):
"""Inverts the z position and gives the ball a random velocity."""
pass
if __name__ == '__main__':
print(Ball_trace.__doc__)
exit()
| 415 | 0 | 26 |
15649df9b42b5cd67d61a71c2708e94928e28b91 | 27,932 | py | Python | encoding_utils.py | CodedK/SublimeEncodingUtils | 97f356275518873117be3e8b573034b3e67789dd | [
"MIT"
] | 3 | 2017-07-24T16:02:38.000Z | 2021-03-23T11:27:52.000Z | encoding_utils.py | CodedK/SublimeEncodingUtils | 97f356275518873117be3e8b573034b3e67789dd | [
"MIT"
] | null | null | null | encoding_utils.py | CodedK/SublimeEncodingUtils | 97f356275518873117be3e8b573034b3e67789dd | [
"MIT"
] | null | null | null | # coding: utf8
import base64
import codecs
import hashlib
import json
import math
import re
import string
import sys
import uuid
import webbrowser
import datetime
import sublime
import sublime_plugin
try:
from .encodingutils.escape_table import (
html_escape_table,
html5_escape_table,
html_reserved_list,
xml_escape_table
)
except ValueError:
from encodingutils.escape_table import (
html5_escape_table,
html_escape_table,
html_reserved_list,
xml_escape_table
)
try:
import urllib.parse
quote_plus = urllib.parse.quote_plus
unquote_plus = urllib.parse.unquote_plus
except ImportError:
import urllib
try:
unichr(32)
except NameError:
CSS = '''
div.sub-enc_utils { padding: 10px; margin: 0; }
.sub-enc_utils h1, .sub-enc_utils h2, .sub-enc_utils h3,
.sub-enc_utils h4, .sub-enc_utils h5, .sub-enc_utils h6 {
{{'.string'|css}}
}
.sub-enc_utils blockquote { {{'.comment'|css}} }
.sub-enc_utils a { text-decoration: none; }
'''
frontmatter = {
"markdown_extensions": [
"markdown.extensions.admonition",
"markdown.extensions.attr_list",
"markdown.extensions.def_list",
"markdown.extensions.nl2br",
# Smart quotes always have corner cases that annoy me, so don't bother with them.
{"markdown.extensions.smarty": {"smart_quotes": False}},
"pymdownx.extrarawhtml",
"pymdownx.keys",
{"pymdownx.escapeall": {"hardbreak": True, "nbsp": True}},
# Sublime doesn't support superscript, so no ordinal numbers
{"pymdownx.smartsymbols": {"ordinal_numbers": False}}
]
}
class EncDocCommand(sublime_plugin.WindowCommand):
"""Open doc page."""
re_pkgs = re.compile(r'^Packages')
def on_navigate(self, href):
"""Handle links."""
if href.startswith('sub://Packages'):
sublime.run_command('open_file', {"file": self.re_pkgs.sub('${packages}', href[6:])})
else:
webbrowser.open_new_tab(href)
def run(self, page):
"""Open page."""
try:
# import mdpopups
# import pymdownx
has_phantom_support = (mdpopups.version() >= (1, 10, 0)) and (int(sublime.version()) >= 3124)
fmatter = mdpopups.format_frontmatter(frontmatter) if pymdownx.version_info[:3] >= (4, 3, 0) else ''
except Exception:
fmatter = ''
has_phantom_support = False
if not has_phantom_support:
sublime.run_command('open_file', {"file": page})
else:
text = sublime.load_resource(page.replace('${packages}', 'Packages'))
view = self.window.new_file()
view.set_name('Sublime Encoding Utils - Quick Start')
view.settings().set('gutter', False)
view.settings().set('word_wrap', False)
if has_phantom_support:
mdpopups.add_phantom(
view,
'quickstart',
sublime.Region(0),
fmatter + text,
sublime.LAYOUT_INLINE,
css=CSS,
wrapper_class="sub-notify",
on_navigate=self.on_navigate
)
else:
view.run_command('insert', {"characters": text})
view.set_read_only(True)
view.set_scratch(True)
# def getSelection(self):
# text = []
# if View().sel():
# for region in View().sel():
# if region.empty():
# text.append(View().substr(View().line(region)))
# else:
# text.append(View().substr(region))
# return text
| 31.419573 | 248 | 0.489331 | # coding: utf8
import base64
import codecs
import hashlib
import json
import math
import re
import string
import sys
import uuid
import webbrowser
import datetime
import sublime
import sublime_plugin
try:
from .encodingutils.escape_table import (
html_escape_table,
html5_escape_table,
html_reserved_list,
xml_escape_table
)
except ValueError:
from encodingutils.escape_table import (
html5_escape_table,
html_escape_table,
html_reserved_list,
xml_escape_table
)
try:
import urllib.parse
quote_plus = urllib.parse.quote_plus
unquote_plus = urllib.parse.unquote_plus
except ImportError:
import urllib
def quote_plus(text):
return urllib.quote_plus(text.encode('utf8'))
def unquote_plus(text):
return urllib.unquote_plus(text.encode('utf8'))
try:
unichr(32)
except NameError:
def unichr(val):
return chr(val)
CSS = '''
div.sub-enc_utils { padding: 10px; margin: 0; }
.sub-enc_utils h1, .sub-enc_utils h2, .sub-enc_utils h3,
.sub-enc_utils h4, .sub-enc_utils h5, .sub-enc_utils h6 {
{{'.string'|css}}
}
.sub-enc_utils blockquote { {{'.comment'|css}} }
.sub-enc_utils a { text-decoration: none; }
'''
frontmatter = {
"markdown_extensions": [
"markdown.extensions.admonition",
"markdown.extensions.attr_list",
"markdown.extensions.def_list",
"markdown.extensions.nl2br",
# Smart quotes always have corner cases that annoy me, so don't bother with them.
{"markdown.extensions.smarty": {"smart_quotes": False}},
"pymdownx.extrarawhtml",
"pymdownx.keys",
{"pymdownx.escapeall": {"hardbreak": True, "nbsp": True}},
# Sublime doesn't support superscript, so no ordinal numbers
{"pymdownx.smartsymbols": {"ordinal_numbers": False}}
]
}
class StringEncodePaste(sublime_plugin.WindowCommand):
def run(self, **kwargs):
items = [
('Base64 Decode', 'base64_decode'),
('Base64 Encode', 'base64_encode'),
('Css Escape', 'css_escape'),
('Css Unescape', 'css_unescape'),
('Dec Hex', 'dec_hex'),
('Encode to Morse', 'morse_me'),
('Escape Like', 'escape_like'),
('Escape Regex', 'escape_regex'),
('Fix Wrong Encoding', 'fix_wrong_encoding'),
('Hex Dec', 'hex_dec'),
('Hex Unicode', 'hex_unicode'),
('Html Deentitize', 'html_deentitize'),
('Html Entitize', 'html_entitize'),
('Insert current date', 'ins_cur_date'),
('Json Escape', 'json_escape'),
('Json Unescape', 'json_unescape'),
('Md5 Encode', 'md5_encode'),
('NCR Decode', 'dencr'),
('NCR Encode', 'panos_ncr'),
('Password Strength', 'strength'),
('Safe Html Deentitize', 'safe_html_deentitize'),
('Safe Html Entitize', 'safe_html_entitize'),
('Sha256 Encode', 'sha256_encode'),
('Sha512 Encode', 'sha512_encode'),
('Sha512 Encode', 'sha512_encode'),
('Shannon Entropy', 'entropy'),
('Unicode Hex', 'unicode_hex'),
('Unixtime to datetime', 'unixstamp'),
('Url Decode', 'url_decode'),
('Url Encode', 'url_encode'),
('Xml Deentitize', 'xml_deentitize'),
('Xml Entitize', 'xml_entitize'),
]
lines = list(map(lambda line: line[0], items))
commands = list(map(lambda line: line[1], items))
view = self.window.active_view()
if not view:
return
def on_done(item):
if item == -1:
return
view.run_command(commands[item], {'source': 'clipboard'})
self.window.show_quick_panel(lines, on_done)
class EncDocCommand(sublime_plugin.WindowCommand):
"""Open doc page."""
re_pkgs = re.compile(r'^Packages')
def on_navigate(self, href):
"""Handle links."""
if href.startswith('sub://Packages'):
sublime.run_command('open_file', {"file": self.re_pkgs.sub('${packages}', href[6:])})
else:
webbrowser.open_new_tab(href)
def run(self, page):
"""Open page."""
try:
# import mdpopups
# import pymdownx
has_phantom_support = (mdpopups.version() >= (1, 10, 0)) and (int(sublime.version()) >= 3124)
fmatter = mdpopups.format_frontmatter(frontmatter) if pymdownx.version_info[:3] >= (4, 3, 0) else ''
except Exception:
fmatter = ''
has_phantom_support = False
if not has_phantom_support:
sublime.run_command('open_file', {"file": page})
else:
text = sublime.load_resource(page.replace('${packages}', 'Packages'))
view = self.window.new_file()
view.set_name('Sublime Encoding Utils - Quick Start')
view.settings().set('gutter', False)
view.settings().set('word_wrap', False)
if has_phantom_support:
mdpopups.add_phantom(
view,
'quickstart',
sublime.Region(0),
fmatter + text,
sublime.LAYOUT_INLINE,
css=CSS,
wrapper_class="sub-notify",
on_navigate=self.on_navigate
)
else:
view.run_command('insert', {"characters": text})
view.set_read_only(True)
view.set_scratch(True)
class StringEncode(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
regions = self.view.sel()
if kwargs.get('source') == 'clipboard':
del kwargs['source']
text = sublime.get_clipboard()
replacement = self.encode(text, **kwargs)
for region in regions:
if region.empty():
self.view.insert(edit, region.begin(), replacement)
else:
self.view.replace(edit, region, replacement)
return
elif 'source' in kwargs:
sublime.status_message('Unsupported source {0!r}'.format(kwargs['source']))
return
if any(map(lambda region: region.empty(), regions)):
regions = [sublime.Region(0, self.view.size())]
for region in regions:
text = self.view.substr(region)
replacement = self.encode(text, **kwargs)
self.view.replace(edit, region, replacement)
class UnixstampCommand(StringEncode):
def encode(self, text):
ret = text
msel = self.view.sel()
# tt = str(msel[0])
# (begin_sel, end_sel) = list(tt)
# return tt
try:
if "-" in text and len(text) == 19:
# probably a date
ret = datetime.datetime.strptime(str(text), "%d-%m-%Y %H:%M:%S")
ret = str(int(ret.timestamp()))
if len(text) > 10 and len(text) < 15 and '-' not in text and '.' in text:
# got float unixtime stamp
ret = datetime.datetime.fromtimestamp(float(text)).strftime('%d-%m-%Y %H:%M:%S:%f')
if len(text) == 10 and '-' not in text:
# got integer timestamp
ret = datetime.datetime.fromtimestamp(int(text)).strftime('%d-%m-%Y %H:%M:%S')
if not msel[0]:
import time
ret = time.time()
ret = str(int(ret))
# Get position
currentposition = int(self.view.sel()[0].begin())
self.view.run_command('insert_snippet', {'contents': ret})
self.view.sel().clear()
self.view.sel().add(sublime.Region(currentposition, currentposition + 10))
# self.view.run_command("expand_selection_to_paragraph")
# self.view.run_command("move", {"by": "stops", "extend": False, "forward": False, "word_begin": True, "punct_begin": True, "separators": ""})
return
except Exception:
try:
# assume its a date
ret = datetime.datetime.strptime(str(text), "%d-%m-%Y %H:%M:%S")
ret = str(int(ret.timestamp()))
return 'Got exception'
# return 'Got exception'
except Exception:
ret = text
return Exception
return ret
# what = self.view.sel()
# zzz = str(len(what[0]))
# return zzz
# def getSelection(self):
# text = []
# if View().sel():
# for region in View().sel():
# if region.empty():
# text.append(View().substr(View().line(region)))
# else:
# text.append(View().substr(region))
# return text
class InsCurDateCommand(StringEncode):
def run(self, page):
self.in_dt()
def in_dt(self):
import time
ret = time.time()
ret = str(int(ret))
self.view.run_command('insert_snippet', {'contents': ret})
class EntropyCommand(StringEncode):
def encode(self, text):
prob = [float(text.count(c)) / len(text) for c in dict.fromkeys(list(text))]
entropy = str(- sum([p * math.log(p) / math.log(2.0) for p in prob]))
entropy = entropy
return entropy
class IdealEntropyCommand(StringEncode):
def encode(self, text):
length = len(text)
prob = 1.0 / length
ret = str(-1.0 * length * prob * math.log(prob) / math.log(2.0))
return ret
class MorseMeCommand(StringEncode):
def encode(self, text):
char_code_map = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"g": "--.",
"h": "....",
"i": "..",
"j": ".---",
"k": "-.-",
"l": ".-..",
"m": "--",
"n": "-.",
"o": "---",
"p": ".--.",
"q": "--.-",
"r": ".-.",
"s": "...",
"t": "-",
# "": "..-",
"v": "...-",
"w": ".--",
"x": "-..-",
"y": "-.--",
"z": "--..",
" ": " ",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----",
".": ".-.-.-",
",": "--..--",
"?": "..--..",
"'": ".----.",
"/": "-..-.",
"(": "-.--.",
")": "-.--.-",
"&": ".-...",
":": "---...",
";": "-.-.-.",
"=": "-...-",
"+": ".-.-.",
"-": "-....-",
"_": "..--.-",
"\"": ".-..-.",
"$": "...-..-",
"!": "-.-.--",
"@": ".--.-."
}
ret = ''
for k in char_code_map:
if k in text:
zpp = char_code_map[k]
# ret = ret + '(' + v + ' | ' + k + ')'
ret = ret + zpp + ' '
return ret
class StrengthCommand(StringEncode):
def encode(self, text):
def read_str(psw):
self.numeric = re.compile(r'\d')
self.loweralpha = re.compile(r'[a-z]')
self.upperalpha = re.compile(r'[A-Z]')
# self.symbols = re.compile('[-_.:,;<>?"#$%&/()!@~]')
self.symbols = re.compile(r'[-!~`@#$%^&*()_+=/?>.<,;:"]')
self.extended = re.compile('[^\x00-r\x7F]+')
self.num_of_symbols = 20 # adjust accordingly...
from math import log, pow
charset = 0
if self.numeric.search(psw):
charset += 10
if self.loweralpha.search(psw):
charset += 26
if self.upperalpha.search(psw):
charset += 26
if self.symbols.search(psw):
charset += self.num_of_symbols
if self.extended.search(psw):
charset = 255
if charset != 0:
str_entropy = str(float(log(pow(charset, len(psw)), 2)))
else:
if len(psw) > 0:
# a symbol thats not defined
str_entropy = str(float(log(pow(255, len(psw)), 2)))
return str_entropy
ret = 0
ret = str(read_str(text))
return ret
class CheckOrdCommand(StringEncode):
def encode(self, text):
ret = ''
for c in text[:]:
ret += str(ord(c)) + '.'
return ret
class PanosRotCommand(StringEncode):
def encode(self, text):
return codecs.encode(text, 'rot_13')
class PanosNcrCommand(StringEncode):
def encode(self, text):
ret = ''
for c in text[:]:
# ret += '&#' + str(ord(c)) + ';'
if ord(c) > 127:
ret += '&#' + str(ord(c)) + ';'
else:
ret += c
return ret
class GenerateUuidCommand(StringEncode):
def run(self, text):
ret = str(uuid.uuid4())
self.view.run_command('insert_snippet', {'contents': ret}) # DOULEYEI
# sublime.active_window().run_command("terminal_notifier", {"title": "Something happened", "subtitle": "Over there", "message": "This is a nice notification."})
return ret
class DencrCommand(StringEncode):
def encode(self, text):
while re.search('&#[0-9]+;', text):
match = re.search('&#([0-9]+);', text)
text = text.replace(match.group(0), unichr(int(match.group(1), 10)))
text = text.replace('&', '&')
return text
class DehcrCommand(StringEncode):
def encode(self, text):
while re.search('�?[xΧ][0-9a-fA-F]+;', text):
# Ε
match = re.search('�?[xΧ]([0-9a-fA-F]+);', text)
text = text.replace(match.group(0), unichr(int(match.group(1), 16)))
# text = text.replace(match.group(0), unichr(int(match.group(1), 10)))
text = text.replace('&', '&')
return text
class PanosHcrCommand(StringEncode):
def encode(self, text):
ret = ''
for c in text[:]:
# if ord(c) > 127:
ret += '&#' + str(hex(ord(c))) + ';'
# else:
# ret += c
return ret
class FixWrongEncodingCommand(StringEncode):
def run(self, text):
global my_text
for region in self.view.sel():
my_text = self.view.substr(region)
# self.view.replace(my_text, region, 'replacement') # den douleyei
# self.view.run_command('insert_snippet', {'contents': my_text}) # DOULEYEI
# self.view.show_popup(text, max_width=200, on_hide=self.done)
# #### Enas tropos epilogis olokliris tis grammis
# currentposition = self.view.sel()[0].begin()
# currentline = self.view.full_line(currentposition)
# my_sel = self.view.substr(currentline)
# self.view.show_popup('The Text other line:' + my_sel, max_width=200, on_hide=self.done)
# window = sublime.active_window()
# window.run_command('hide_panel')
self.check_first()
# print('User sent:', ret)
# 'something' is the default message
# self.view.window().show_input_panel("Please select the correct encoding:", 'iso-8859-7', self.on_done(text, text), None, None)
def done(self):
print("finished")
def check_first(self):
# ÄïêéìÞ ÅëëçíéêÜ
# window.show_input_panel('Search For 2:', '', self.on_done, None, None)
items = ['Latin to iso-8859-2', 'Latin to iso-8859-3', 'Latin to iso-8859-4', 'Latin to iso-8859-5', 'Latin to iso-8859-6', 'Latin to Greek', 'Extended Latin to Greek', 'Latin to iso-8859-8', 'Latin to iso-8859-9', 'Unicode points to UTF8']
# self.view.show_popup_menu(items, self.on_done)
self.view.window().show_quick_panel(items=items,
selected_index=6,
# on_select=lambda x: print("s:%i" % x), on_highlight=lambda x: print("h:%i" % x)
on_select=self.on_done
)
# self.view.show_popup('The Text other line', max_width=100, on_hide=self.on_done(edit))
def on_done(self, result):
def_enc = 'iso-8859-7'
from_enc = 'iso-8859-1' # subset
if result == 0:
def_enc = 'iso-8859-2'
if result == 1:
def_enc = 'iso-8859-3'
if result == 2:
def_enc = 'iso-8859-4'
if result == 3:
def_enc = 'iso-8859-5'
if result == 4:
def_enc = 'iso-8859-6'
if result == 5:
from_enc = 'iso-8859-1' # subset
def_enc = 'iso-8859-7'
if result == 6:
# from_enc = 'cp850' # superset
from_enc = 'cp1252' # superset
def_enc = 'utf-8'
if result == 7:
def_enc = 'iso-8859-8'
if result == 8:
def_enc = 'iso-8859-9'
if result == 9:
from_enc = 'utf-8'
def_enc = 'unicode-escape'
ret = ''
print("Selected value:" + str(result))
# import time
try:
if result != -1:
if result in [6, 9]:
# ret = bytes(my_text, from_enc).decode('utf-8')
ret = my_text.encode(from_enc).decode(def_enc)
self.view.run_command('insert_snippet', {'contents': ret})
else:
for c in my_text[:]:
ret += c.encode(from_enc).decode(def_enc)
self.view.run_command('insert_snippet', {'contents': ret}) # DOULEYEI
# self.view.show_popup('Hello, <b>World!</b><br><a href="moo">Click Me</a>', on_navigate=print)
except Exception as e:
self.view.show_popup('<b>' + def_enc + '</b> is not the correct encoding for this text!<br><br><b>Error:</b> <br><i>' + str(e) + '</i>', on_navigate=print)
class HtmlEntitizeCommand(StringEncode):
def encode(self, text):
text = text.replace('&', '&')
for k in html_escape_table:
v = html_escape_table[k]
text = text.replace(k, v)
ret = ''
for c in text[:]:
if ord(c) > 127:
ret += hex(ord(c)).replace('0x', '&#x') + ';'
else:
ret += c
return ret
class HtmlDeentitizeCommand(StringEncode):
def encode(self, text):
for k in html_escape_table:
v = html_escape_table[k]
text = text.replace(v, k)
for k in html5_escape_table:
v = html5_escape_table[k]
text = text.replace(v, k)
while re.search('&#[xX][a-fA-F0-9]+;', text):
match = re.search('&#[xX]([a-fA-F0-9]+);', text)
text = text.replace(
match.group(0), unichr(int('0x' + match.group(1), 16)))
text = text.replace('&', '&')
return text
class CssEscapeCommand(StringEncode):
def encode(self, text):
ret = ''
for c in text[:]:
if ord(c) > 127:
ret += hex(ord(c)).replace('0x', '\\')
else:
ret += c
return ret
class CssUnescapeCommand(StringEncode):
def encode(self, text):
while re.search(r'\\[a-fA-F0-9]+', text):
match = re.search(r'\\([a-fA-F0-9]+)', text)
text = text.replace(
match.group(0), unichr(int('0x' + match.group(1), 16)))
return text
class SafeHtmlEntitizeCommand(StringEncode):
def encode(self, text):
for k in html_escape_table:
# skip HTML reserved characters
if k in html_reserved_list:
continue
v = html_escape_table[k]
text = text.replace(k, v)
ret = ''
for c in text[:]:
if ord(c) > 127:
ret += hex(ord(c)).replace('0x', '&#x') + ';'
else:
ret += c
return ret
class SafeHtmlDeentitizeCommand(StringEncode):
def encode(self, text):
for k in html_escape_table:
# skip HTML reserved characters
if k in html_reserved_list:
continue
v = html_escape_table[k]
text = text.replace(v, k)
while re.search('&#[xX][a-fA-F0-9]+;', text):
match = re.search('&#[xX]([a-fA-F0-9]+);', text)
text = text.replace(
match.group(0), unichr(int('0x' + match.group(1), 16)))
text = text.replace('&', '&')
return text
class XmlEntitizeCommand(StringEncode):
def encode(self, text):
text = text.replace('&', '&')
for k in xml_escape_table:
v = xml_escape_table[k]
text = text.replace(k, v)
ret = ''
for c in text[:]:
if ord(c) > 127:
ret += hex(ord(c)).replace('0x', '&#x') + ';'
else:
ret += c
return ret
class XmlDeentitizeCommand(StringEncode):
def encode(self, text):
for k in xml_escape_table:
v = xml_escape_table[k]
text = text.replace(v, k)
text = text.replace('&', '&')
return text
class JsonEscapeCommand(StringEncode):
def encode(self, text):
return json.dumps(text)
class JsonUnescapeCommand(StringEncode):
def encode(self, text):
return json.loads(text)
class UrlEncodeCommand(StringEncode):
def encode(self, text, old_school=True):
quoted = quote_plus(text)
if old_school:
return quoted.replace("+", "%20")
return quoted
class UrlDecodeCommand(StringEncode):
def encode(self, text):
return unquote_plus(text)
class Base64EncodeCommand(StringEncode):
def encode(self, text):
return base64.b64encode(text.encode('raw_unicode_escape')).decode('ascii')
class Base64DecodeCommand(StringEncode):
def encode(self, text):
return base64.b64decode(text).decode('raw_unicode_escape')
class Md5EncodeCommand(StringEncode):
def encode(self, text):
hasher = hashlib.md5()
hasher.update(bytes(text, 'utf-8'))
return hasher.hexdigest()
class Sha256EncodeCommand(StringEncode):
def encode(self, text):
hasher = hashlib.sha256()
hasher.update(bytes(text, 'utf-8'))
return hasher.hexdigest()
class Sha1EncodeCommand(StringEncode):
def encode(self, text):
hasher = hashlib.sha1()
hasher.update(bytes(text, 'utf-8'))
return hasher.hexdigest()
class Sha512EncodeCommand(StringEncode):
def encode(self, text):
hasher = hashlib.sha512()
hasher.update(bytes(text, 'utf-8'))
return hasher.hexdigest()
class Escaper(StringEncode):
def encode(self, text):
return re.sub(r'(?<!\\)(%s)' % self.meta, r'\\\1', text)
class EscapeRegexCommand(Escaper):
meta = r'[\\*.+^$()\[\]\{\}]'
class EscapeLikeCommand(Escaper):
meta = r'[%_]'
class HexDecCommand(StringEncode):
def encode(self, text):
return str(int(text, 16))
class DecHexCommand(StringEncode):
def encode(self, text):
return hex(int(text))
class UnicodeHexCommand(StringEncode):
def encode(self, text):
hex_text = u''
text_bytes = bytes(text, 'utf-16')
if text_bytes[0:2] == b'\xff\xfe':
endian = 'little'
text_bytes = text_bytes[2:]
elif text_bytes[0:2] == b'\xfe\xff':
endian = 'big'
text_bytes = text_bytes[2:]
char_index = 0
for c in text_bytes:
if char_index == 0:
c1 = c
char_index += 1
elif char_index == 1:
c2 = c
if endian == 'little':
c1, c2 = c2, c1
tmp = (c1 << 8) + c2
if tmp < 0x80:
hex_text += chr(tmp)
char_index = 0
elif tmp >= 0xd800 and tmp <= 0xdbff:
char_index += 1
else:
hex_text += '\\u' + '{0:04x}'.format(tmp)
char_index = 0
elif char_index == 2:
c3 = c
char_index += 1
elif char_index == 3:
c4 = c
if endian == 'little':
c3, c4 = c4, c3
tmp1 = ((c1 << 8) + c2) - 0xd800
tmp2 = ((c3 << 8) + c4) - 0xdc00
tmp = (tmp1 * 0x400) + tmp2 + 0x10000
hex_text += '\\U' + '{0:08x}'.format(tmp)
char_index = 0
return hex_text
class HexUnicodeCommand(StringEncode):
def encode(self, text):
uni_text = text
endian = sys.byteorder
r = re.compile(r'\\u([0-9a-fA-F]{2})([0-9a-fA-F]{2})')
rr = r.search(uni_text)
while rr:
first_byte = int(rr.group(1), 16)
if first_byte >= 0xd8 and first_byte <= 0xdf:
# Surrogate pair
pass
else:
if endian == 'little':
b1 = int(rr.group(2), 16)
b2 = int(rr.group(1), 16)
else:
b1 = int(rr.group(1), 16)
b2 = int(rr.group(2), 16)
ch = bytes([b1, b2]).decode('utf-16')
uni_text = uni_text.replace(rr.group(0), ch)
rr = r.search(uni_text, rr.start(0) + 1)
# Surrogate pair (2 bytes + 2 bytes)
r = re.compile(
r'\\u([0-9a-fA-F]{2})([0-9a-fA-F]{2})\\u([0-9a-fA-F]{2})([0-9a-fA-F]{2})')
rr = r.search(uni_text)
while rr:
if endian == 'little':
b1 = int(rr.group(2), 16)
b2 = int(rr.group(1), 16)
b3 = int(rr.group(4), 16)
b4 = int(rr.group(3), 16)
else:
b1 = int(rr.group(1), 16)
b2 = int(rr.group(2), 16)
b3 = int(rr.group(3), 16)
b4 = int(rr.group(4), 16)
ch = bytes([b1, b2, b3, b4]).decode('utf-16')
uni_text = uni_text.replace(rr.group(0), ch)
rr = r.search(uni_text)
# Surrogate pair (4 bytes)
r = re.compile(
r'\\U([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})')
rr = r.search(uni_text)
while rr:
tmp = (int(rr.group(1), 16) << 24) \
+ (int(rr.group(2), 16) << 16) \
+ (int(rr.group(3), 16) << 8) \
+ (int(rr.group(4), 16))
if tmp <= 0xffff:
ch = chr(tmp)
else:
tmp -= 0x10000
c1 = 0xd800 + int(tmp / 0x400)
c2 = 0xdc00 + int(tmp % 0x400)
if endian == 'little':
b1 = c1 & 0xff
b2 = c1 >> 8
b3 = c2 & 0xff
b4 = c2 >> 8
else:
b1 = c1 >> 8
b2 = c1 & 0xff
b3 = c2 >> 8
b4 = c2 & 0xff
ch = bytes([b1, b2, b3, b4]).decode('utf-16')
uni_text = uni_text.replace(rr.group(0), ch)
rr = r.search(uni_text)
return uni_text
| 21,213 | 762 | 2,169 |
a7d26187bda32489c65bb95c3ce97baddbc8200c | 1,709 | py | Python | python/sparseglm/benchmarks/multitask.py | PABannier/sparseglm | ec1c6a15786d9fc0cc58ae3d7e28227bbc9077e9 | [
"MIT"
] | null | null | null | python/sparseglm/benchmarks/multitask.py | PABannier/sparseglm | ec1c6a15786d9fc0cc58ae3d7e28227bbc9077e9 | [
"MIT"
] | null | null | null | python/sparseglm/benchmarks/multitask.py | PABannier/sparseglm | ec1c6a15786d9fc0cc58ae3d7e28227bbc9077e9 | [
"MIT"
] | null | null | null | import time
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model import MultiTaskLasso as MultiTaskLasso_sk
from sparseglm.estimators import MultiTaskLasso
from sparseglm.utils import make_correlated_data, compute_alpha_max
n_samples = 100
n_features = 3000
n_tasks = 80
snr = 2
corr = 0.7
density = 0.1
tol = 1e-9
reg = 0.1
X, Y, _ = make_correlated_data(
n_samples=n_samples,
n_features=n_features,
n_tasks=n_tasks,
corr=corr,
snr=snr,
density=density,
random_state=0,
)
X_sparse = sp.csc_matrix(X * np.random.binomial(1, 0.1, X.shape))
alpha_max = compute_alpha_max(X, Y)
estimator_sk = MultiTaskLasso_sk(
alpha_max * reg, fit_intercept=False, tol=tol, max_iter=10 ** 6
)
estimator_rl = MultiTaskLasso(alpha_max * reg, tol=tol, verbose=False)
print("Fitting dense matrices...")
coef_sk, duration_sk = time_estimator(estimator_sk, X, Y)
coef_rl, duration_rl = time_estimator(estimator_rl, X, Y)
np.testing.assert_allclose(coef_sk, coef_rl, atol=1e-5)
print("Fitting sparse matrices...")
coef_sk_sparse, duration_sk_sparse = time_estimator(
estimator_sk, X_sparse.toarray(), Y
)
coef_rl_sparse, duration_rl_sparse = time_estimator(estimator_rl, X_sparse, Y)
np.testing.assert_allclose(coef_sk_sparse, coef_rl_sparse, atol=1e-5)
print("=" * 5 + " RESULTS " + "=" * 5)
print(f"[DENSE] Scikit-learn :: {duration_sk} s")
print(f"[DENSE] SparseGLM :: {duration_rl} s")
print("--" * 5)
print(f"[SPARSE] Scikit-learn :: {duration_sk_sparse} s")
print(f"[SPARSE] SparseGLM :: {duration_rl_sparse} s")
| 23.410959 | 78 | 0.726156 | import time
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model import MultiTaskLasso as MultiTaskLasso_sk
from sparseglm.estimators import MultiTaskLasso
from sparseglm.utils import make_correlated_data, compute_alpha_max
n_samples = 100
n_features = 3000
n_tasks = 80
snr = 2
corr = 0.7
density = 0.1
tol = 1e-9
reg = 0.1
X, Y, _ = make_correlated_data(
n_samples=n_samples,
n_features=n_features,
n_tasks=n_tasks,
corr=corr,
snr=snr,
density=density,
random_state=0,
)
X_sparse = sp.csc_matrix(X * np.random.binomial(1, 0.1, X.shape))
def time_estimator(clf, X, y):
start = time.time()
clf.fit(X, y)
duration = time.time() - start
return clf.coef_, duration
alpha_max = compute_alpha_max(X, Y)
estimator_sk = MultiTaskLasso_sk(
alpha_max * reg, fit_intercept=False, tol=tol, max_iter=10 ** 6
)
estimator_rl = MultiTaskLasso(alpha_max * reg, tol=tol, verbose=False)
print("Fitting dense matrices...")
coef_sk, duration_sk = time_estimator(estimator_sk, X, Y)
coef_rl, duration_rl = time_estimator(estimator_rl, X, Y)
np.testing.assert_allclose(coef_sk, coef_rl, atol=1e-5)
print("Fitting sparse matrices...")
coef_sk_sparse, duration_sk_sparse = time_estimator(
estimator_sk, X_sparse.toarray(), Y
)
coef_rl_sparse, duration_rl_sparse = time_estimator(estimator_rl, X_sparse, Y)
np.testing.assert_allclose(coef_sk_sparse, coef_rl_sparse, atol=1e-5)
print("=" * 5 + " RESULTS " + "=" * 5)
print(f"[DENSE] Scikit-learn :: {duration_sk} s")
print(f"[DENSE] SparseGLM :: {duration_rl} s")
print("--" * 5)
print(f"[SPARSE] Scikit-learn :: {duration_sk_sparse} s")
print(f"[SPARSE] SparseGLM :: {duration_rl_sparse} s")
| 117 | 0 | 23 |
d65589beaa70177db0edb7397dfca6c479adcdb7 | 1,887 | py | Python | pyloading_bar/bar.py | NalbertLeal/pyloading | dd9f9788c961abb1d669dade8603a08be18053d9 | [
"Apache-2.0"
] | 1 | 2022-03-23T15:17:07.000Z | 2022-03-23T15:17:07.000Z | pyloading_bar/bar.py | NalbertLeal/pyloading | dd9f9788c961abb1d669dade8603a08be18053d9 | [
"Apache-2.0"
] | 2 | 2022-03-23T15:27:36.000Z | 2022-03-23T15:47:13.000Z | pyloading_bar/bar.py | NalbertLeal/pyloading | dd9f9788c961abb1d669dade8603a08be18053d9 | [
"Apache-2.0"
] | 1 | 2022-03-23T15:35:16.000Z | 2022-03-23T15:35:16.000Z | import math
import os
| 29.484375 | 106 | 0.663487 | import math
import os
class Bar:
def __init__(self, total, current_step=0, symbol='#', update_terminal=True, length=None, template=None):
self._total = total
self._length = self._calc_length(length, total)
self._current_step = current_step
self._symbol = symbol
self._update_terminal = update_terminal
self._char_step = self._total / self._length
self._inner_step = self._char_step / len(self._symbol)
self._walls = '[]'
self._extract_template(template)
self._draw()
def _extract_template(self, template):
if (template):
self._symbol = template[1:-1]
self._inner_step = self._char_step / len(self._symbol)
self._walls = template[0] + template[-1]
def _calc_length(self, length, total):
(columns, _) = os.get_terminal_size()
final_length = length if length is not None else total
if final_length > columns - 10:
return columns - 10
else:
return final_length
def _loading_bar(self):
full_symbol = self._symbol[-1]
full_step = math.floor(self._current_step / self._char_step)
rest = int(self._current_step % self._char_step)
char_index = math.floor(rest / self._inner_step)
progress_done = full_step * full_symbol
progress_todo = (self._length - full_step - 1 ) * ' '
if (self._current_step == self._total):
bar = f'{self._walls[0]}{progress_done}{self._walls[1]}'
else:
bar = f'{self._walls[0]}{progress_done}{self._symbol[char_index]}{progress_todo}{self._walls[1]}'
percentage = round((self._current_step / self._total) * 100, 2)
str_percentage = f' {percentage}%'
return f'{bar}{str_percentage}'
def _draw(self):
bar = self._loading_bar()
if self._update_terminal:
print(f'\r{bar}', end='')
else:
print(f'{bar}')
def next(self):
self._current_step += 1
self._draw() | 1,696 | -11 | 176 |
13a118578e83cf905b5e96b6a52910b29838edf4 | 32,093 | py | Python | pyscf/mcscf/newton_casscf.py | KMCzajkowski/pyscf | e8af41d910cc0d3963655120c0b689590ad978e7 | [
"BSD-2-Clause"
] | null | null | null | pyscf/mcscf/newton_casscf.py | KMCzajkowski/pyscf | e8af41d910cc0d3963655120c0b689590ad978e7 | [
"BSD-2-Clause"
] | null | null | null | pyscf/mcscf/newton_casscf.py | KMCzajkowski/pyscf | e8af41d910cc0d3963655120c0b689590ad978e7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Second order CASSCF
'''
import sys
import time
import copy
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.mcscf import casci, mc1step
from pyscf.mcscf.casci import get_fock, cas_natorb, canonicalize
from pyscf.mcscf import mc_ao2mo
from pyscf.mcscf import chkfile
from pyscf import ao2mo
from pyscf import scf
from pyscf.scf import ciah
from pyscf import fci
# gradients, hessian operator and hessian diagonal
u, ci_kf = extract_rotation(casscf, dr, u, ci_kf)
log.debug(' tot inner=%d |g|= %4.3g (%4.3g %4.3g) |u-1|= %4.3g |dci|= %4.3g',
stat.imic, norm_gall, norm_gorb, norm_gci,
numpy.linalg.norm(u-numpy.eye(nmo)),
numpy.linalg.norm(ci_kf-ci0))
return u, ci_kf, norm_gkf, stat, dxi
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=None,
ci0=None, callback=None, verbose=logger.NOTE, dump_chk=True):
'''CASSCF solver
'''
log = logger.new_logger(casscf, verbose)
cput0 = (time.clock(), time.time())
log.debug('Start newton CASSCF')
if callback is None:
callback = casscf.callback
mo = mo_coeff
nmo = mo_coeff.shape[1]
#TODO: lazy evaluate eris, to leave enough memory for FCI solver
eris = casscf.ao2mo(mo)
e_tot, e_ci, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if casscf.ncas == nmo and not casscf.internal_rotation:
if casscf.canonicalization:
log.debug('CASSCF canonicalization')
mo, fcivec, mo_energy = casscf.canonicalize(mo, fcivec, eris, False,
casscf.natorb, verbose=log)
return True, e_tot, e_ci, fcivec, mo, mo_energy
casdm1 = casscf.fcisolver.make_rdm1(fcivec, casscf.ncas, casscf.nelecas)
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 3
conv = False
totmicro = totinner = 0
norm_gorb = norm_gci = -1
de, elast = e_tot, e_tot
dr0 = None
t2m = t1m = log.timer('Initializing newton CASSCF', *cput0)
imacro = 0
tot_hop = 0
tot_kf = 0
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
u, fcivec, norm_gall, stat, dr0 = \
update_orb_ci(casscf, mo, fcivec, eris, dr0, conv_tol_grad*.3, verbose=log)
tot_hop += stat.tot_hop
tot_kf += stat.tot_kf
t2m = log.timer('update_orb_ci', *t2m)
eris = None
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t2m = log.timer('update eri', *t2m)
e_tot, e_ci, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
log.timer('CASCI solver', *t2m)
t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
if (abs(de) < tol and norm_gall < conv_tol_grad):
conv = True
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('newton CASSCF converged in %d macro (%d KF %d Hx) steps',
imacro, tot_kf, tot_hop)
else:
log.info('newton CASSCF not converged, %d macro (%d KF %d Hx) steps',
imacro, tot_kf, tot_hop)
casdm1 = casscf.fcisolver.make_rdm1(fcivec, casscf.ncas, casscf.nelecas)
if casscf.canonicalization:
log.info('CASSCF canonicalization')
mo, fcivec, mo_energy = \
casscf.canonicalize(mo, fcivec, eris, False, casscf.natorb, casdm1, log)
if casscf.natorb: # dump_chk may save casdm1
occ, ucas = casscf._eig(-casdm1, ncore, nocc)[0]
casdm1 = -occ
if dump_chk:
casscf.dump_chk(locals())
log.timer('newton CASSCF', *cput0)
return conv, e_tot, e_ci, fcivec, mo, mo_energy
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
import pyscf.fci
from pyscf.mcscf import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.22013929407)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -15.950852049859-mol.energy_nuc())
mol.atom = [
['H', ( 5.,-1. , 1. )],
['H', ( 0.,-5. ,-2. )],
['H', ( 4.,-0.5 ,-3. )],
['H', ( 0.,-4.5 ,-1. )],
['H', ( 3.,-0.5 ,-0. )],
['H', ( 0.,-3. ,-1. )],
['H', ( 2.,-2.5 , 0. )],
['H', ( 1., 1. , 3. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.62638367550087, emc - -3.6268060528596635)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -3.62638367550087)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = CASSCF(m, 6, 4)
mc.fcisolver = pyscf.fci.solver(mol)
mc.verbose = 4
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
emc = mc.mc1step(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
mc = CASSCF(m, 6, (3,1))
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
mc.verbose = 4
emc = mc.mc1step(mo)[0]
#mc.analyze()
print(emc - -75.7155632535814)
mc.internal_rotation = True
emc = mc.mc1step(mo)[0]
print(emc - -75.7155632535814)
| 39.090134 | 110 | 0.561805 | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Second order CASSCF
'''
import sys
import time
import copy
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.mcscf import casci, mc1step
from pyscf.mcscf.casci import get_fock, cas_natorb, canonicalize
from pyscf.mcscf import mc_ao2mo
from pyscf.mcscf import chkfile
from pyscf import ao2mo
from pyscf import scf
from pyscf.scf import ciah
from pyscf import fci
# gradients, hessian operator and hessian diagonal
def gen_g_hop(casscf, mo, ci0, eris, verbose=None):
ncas = casscf.ncas
ncore = casscf.ncore
nocc = ncas + ncore
nelecas = casscf.nelecas
nmo = mo.shape[1]
ci0 = ci0.ravel()
if hasattr(casscf.fcisolver, 'gen_linkstr'):
linkstrl = casscf.fcisolver.gen_linkstr(ncas, nelecas, True)
linkstr = casscf.fcisolver.gen_linkstr(ncas, nelecas, False)
else:
linkstrl = linkstr = None
def fci_matvec(civec, h1, h2):
h2cas = casscf.fcisolver.absorb_h1e(h1, h2, ncas, nelecas, .5)
hc = casscf.fcisolver.contract_2e(h2cas, civec, ncas, nelecas, link_index=linkstrl).ravel()
return hc
# part5
jkcaa = numpy.empty((nocc,ncas))
# part2, part3
vhf_a = numpy.empty((nmo,nmo))
# part1 ~ (J + 2K)
casdm1, casdm2 = casscf.fcisolver.make_rdm12(ci0, ncas, nelecas, link_index=linkstr)
dm2tmp = casdm2.transpose(1,2,0,3) + casdm2.transpose(0,2,1,3)
dm2tmp = dm2tmp.reshape(ncas**2,-1)
hdm2 = numpy.empty((nmo,ncas,nmo,ncas))
g_dm2 = numpy.empty((nmo,ncas))
eri_cas = numpy.empty((ncas,ncas,ncas,ncas))
for i in range(nmo):
jbuf = eris.ppaa[i]
kbuf = eris.papa[i]
if i < nocc:
jkcaa[i] = numpy.einsum('ik,ik->i', 6*kbuf[:,i]-2*jbuf[i], casdm1)
vhf_a[i] =(numpy.einsum('quv,uv->q', jbuf, casdm1)
- numpy.einsum('uqv,uv->q', kbuf, casdm1) * .5)
jtmp = lib.dot(jbuf.reshape(nmo,-1), casdm2.reshape(ncas*ncas,-1))
jtmp = jtmp.reshape(nmo,ncas,ncas)
ktmp = lib.dot(kbuf.transpose(1,0,2).reshape(nmo,-1), dm2tmp)
hdm2[i] = (ktmp.reshape(nmo,ncas,ncas)+jtmp).transpose(1,0,2)
g_dm2[i] = numpy.einsum('uuv->v', jtmp[ncore:nocc])
if ncore <= i < nocc:
eri_cas[i-ncore] = jbuf[ncore:nocc]
jbuf = kbuf = jtmp = ktmp = dm2tmp = casdm2 = None
vhf_ca = eris.vhf_c + vhf_a
h1e_mo = reduce(numpy.dot, (mo.T, casscf.get_hcore(), mo))
################# gradient #################
gpq = numpy.zeros_like(h1e_mo)
gpq[:,:ncore] = (h1e_mo[:,:ncore] + vhf_ca[:,:ncore]) * 2
gpq[:,ncore:nocc] = numpy.dot(h1e_mo[:,ncore:nocc]+eris.vhf_c[:,ncore:nocc],casdm1)
gpq[:,ncore:nocc] += g_dm2
h1cas_0 = h1e_mo[ncore:nocc,ncore:nocc] + eris.vhf_c[ncore:nocc,ncore:nocc]
h2cas_0 = casscf.fcisolver.absorb_h1e(h1cas_0, eri_cas, ncas, nelecas, .5)
hc0 = casscf.fcisolver.contract_2e(h2cas_0, ci0, ncas, nelecas, link_index=linkstrl).ravel()
eci0 = ci0.dot(hc0)
gci = hc0 - ci0 * eci0
def g_update(u, fcivec):
uc = u[:,:ncore].copy()
ua = u[:,ncore:nocc].copy()
rmat = u - numpy.eye(nmo)
ra = rmat[:,ncore:nocc].copy()
mo1 = numpy.dot(mo, u)
mo_c = numpy.dot(mo, uc)
mo_a = numpy.dot(mo, ua)
dm_c = numpy.dot(mo_c, mo_c.T) * 2
fcivec *= 1./numpy.linalg.norm(fcivec)
casdm1, casdm2 = casscf.fcisolver.make_rdm12(fcivec, ncas, nelecas, link_index=linkstr)
#casscf.with_dep4 = False
#casscf.ci_response_space = 3
#casscf.ci_grad_trust_region = 3
#casdm1, casdm2, gci, fcivec = casscf.update_casdm(mo, u, fcivec, 0, eris, locals())
dm_a = reduce(numpy.dot, (mo_a, casdm1, mo_a.T))
vj, vk = casscf.get_jk(casscf.mol, (dm_c, dm_a))
vhf_c = reduce(numpy.dot, (mo1.T, vj[0]-vk[0]*.5, mo1[:,:nocc]))
vhf_a = reduce(numpy.dot, (mo1.T, vj[1]-vk[1]*.5, mo1[:,:nocc]))
h1e_mo1 = reduce(numpy.dot, (u.T, h1e_mo, u[:,:nocc]))
p1aa = numpy.empty((nmo,ncas,ncas*ncas))
paa1 = numpy.empty((nmo,ncas*ncas,ncas))
aaaa = numpy.empty([ncas]*4)
for i in range(nmo):
jbuf = eris.ppaa[i]
kbuf = eris.papa[i]
p1aa[i] = lib.dot(ua.T, jbuf.reshape(nmo,-1))
paa1[i] = lib.dot(kbuf.transpose(0,2,1).reshape(-1,nmo), ra)
if ncore <= i < nocc:
aaaa[i-ncore] = jbuf[ncore:nocc]
# active space Hamiltonian up to 2nd order
aa11 = lib.dot(ua.T, p1aa.reshape(nmo,-1)).reshape([ncas]*4)
aa11 = aa11 + aa11.transpose(2,3,0,1) - aaaa
a11a = lib.dot(ra.T, paa1.reshape(nmo,-1)).reshape((ncas,)*4)
a11a = a11a + a11a.transpose(1,0,2,3)
a11a = a11a + a11a.transpose(0,1,3,2)
eri_cas_2 = aa11 + a11a
h1cas_2 = h1e_mo1[ncore:nocc,ncore:nocc] + vhf_c[ncore:nocc,ncore:nocc]
fcivec = fcivec.ravel()
hc0 = fci_matvec(fcivec, h1cas_2, eri_cas_2)
gci = hc0 - fcivec * fcivec.dot(hc0)
g = numpy.zeros_like(h1e_mo)
g[:,:ncore] = (h1e_mo1[:,:ncore] + vhf_c[:,:ncore] + vhf_a[:,:ncore]) * 2
g[:,ncore:nocc] = numpy.dot(h1e_mo1[:,ncore:nocc]+vhf_c[:,ncore:nocc], casdm1)
# 0000 + 1000 + 0100 + 0010 + 0001 + 1100 + 1010 + 1001 (missing 0110 + 0101 + 0011)
p1aa = lib.dot(u.T, p1aa.reshape(nmo,-1)).reshape(nmo,ncas,ncas,ncas)
paa1 = lib.dot(u.T, paa1.reshape(nmo,-1)).reshape(nmo,ncas,ncas,ncas)
p1aa += paa1
p1aa += paa1.transpose(0,1,3,2)
g[:,ncore:nocc] += numpy.einsum('puwx,wxuv->pv', p1aa, casdm2)
g_orb = casscf.pack_uniq_var(g-g.T)
return numpy.hstack((g_orb*2, gci*2))
############## hessian, diagonal ###########
# part7
dm1 = numpy.zeros((nmo,nmo))
idx = numpy.arange(ncore)
dm1[idx,idx] = 2
dm1[ncore:nocc,ncore:nocc] = casdm1
h_diag = numpy.einsum('ii,jj->ij', h1e_mo, dm1) - h1e_mo * dm1
h_diag = h_diag + h_diag.T
# part8
g_diag = gpq.diagonal()
h_diag -= g_diag + g_diag.reshape(-1,1)
idx = numpy.arange(nmo)
h_diag[idx,idx] += g_diag * 2
# part2, part3
v_diag = vhf_ca.diagonal() # (pr|kl) * E(sq,lk)
h_diag[:,:ncore] += v_diag.reshape(-1,1) * 2
h_diag[:ncore] += v_diag * 2
idx = numpy.arange(ncore)
h_diag[idx,idx] -= v_diag[:ncore] * 4
# V_{pr} E_{sq}
tmp = numpy.einsum('ii,jj->ij', eris.vhf_c, casdm1)
h_diag[:,ncore:nocc] += tmp
h_diag[ncore:nocc,:] += tmp.T
tmp = -eris.vhf_c[ncore:nocc,ncore:nocc] * casdm1
h_diag[ncore:nocc,ncore:nocc] += tmp + tmp.T
# part4
# -2(pr|sq) + 4(pq|sr) + 4(pq|rs) - 2(ps|rq)
tmp = 6 * eris.k_pc - 2 * eris.j_pc
h_diag[ncore:,:ncore] += tmp[ncore:]
h_diag[:ncore,ncore:] += tmp[ncore:].T
# part5 and part6 diag
# -(qr|kp) E_s^k p in core, sk in active
h_diag[:nocc,ncore:nocc] -= jkcaa
h_diag[ncore:nocc,:nocc] -= jkcaa.T
v_diag = numpy.einsum('ijij->ij', hdm2)
h_diag[ncore:nocc,:] += v_diag.T
h_diag[:,ncore:nocc] += v_diag
# Does this term contribute to internal rotation?
# h_diag[ncore:nocc,ncore:nocc] -= v_diag[:,ncore:nocc]*2
h_diag = casscf.pack_uniq_var(h_diag)
hci_diag = casscf.fcisolver.make_hdiag(h1cas_0, eri_cas, ncas, nelecas)
hci_diag -= eci0
hci_diag -= gci * ci0 * 4
hdiag_all = numpy.hstack((h_diag*2, hci_diag*2))
g_orb = casscf.pack_uniq_var(gpq-gpq.T)
g_all = numpy.hstack((g_orb*2, gci*2))
ngorb = g_orb.size
def h_op(x):
x1 = casscf.unpack_uniq_var(x[:ngorb])
ci1 = x[ngorb:]
# H_cc
hci1 = casscf.fcisolver.contract_2e(h2cas_0, ci1, ncas, nelecas, link_index=linkstrl).ravel()
hci1 -= ci1 * eci0
hci1 -= ((hc0-ci0*eci0)*ci0.dot(ci1) + ci0*(hc0-ci0*eci0).dot(ci1)) * 2
# H_co
rc = x1[:,:ncore]
ra = x1[:,ncore:nocc]
ddm_c = numpy.zeros((nmo,nmo))
ddm_c[:,:ncore] = rc[:,:ncore] * 2
ddm_c[:ncore,:]+= rc[:,:ncore].T * 2
tdm1, tdm2 = casscf.fcisolver.trans_rdm12(ci1, ci0, ncas, nelecas, link_index=linkstr)
tdm1 = tdm1 + tdm1.T
tdm2 = tdm2 + tdm2.transpose(1,0,3,2)
tdm2 =(tdm2 + tdm2.transpose(2,3,0,1)) * .5
vhf_a = numpy.empty((nmo,ncore))
paaa = numpy.empty((nmo,ncas,ncas,ncas))
jk = 0
for i in range(nmo):
jbuf = eris.ppaa[i]
kbuf = eris.papa[i]
paaa[i] = jbuf[ncore:nocc]
vhf_a[i] = numpy.einsum('quv,uv->q', jbuf[:ncore], tdm1)
vhf_a[i]-= numpy.einsum('uqv,uv->q', kbuf[:,:ncore], tdm1) * .5
jk += numpy.einsum('quv,q->uv', jbuf, ddm_c[i])
jk -= numpy.einsum('uqv,q->uv', kbuf, ddm_c[i]) * .5
g_dm2 = numpy.einsum('puwx,wxuv->pv', paaa, tdm2)
aaaa = numpy.dot(ra.T, paaa.reshape(nmo,-1)).reshape([ncas]*4)
aaaa = aaaa + aaaa.transpose(1,0,2,3)
aaaa = aaaa + aaaa.transpose(2,3,0,1)
h1aa = numpy.dot(h1e_mo[ncore:nocc]+eris.vhf_c[ncore:nocc], ra)
h1aa = h1aa + h1aa.T + jk
h1c0 = fci_matvec(ci0, h1aa, aaaa)
hci1 += h1c0
hci1 -= h1c0.dot(ci0) * ci0
# H_oo
# part7
# (-h_{sp} R_{rs} gamma_{rq} - h_{rq} R_{pq} gamma_{sp})/2 + (pr<->qs)
x2 = reduce(lib.dot, (h1e_mo, x1, dm1))
# part8
# (g_{ps}\delta_{qr}R_rs + g_{qr}\delta_{ps}) * R_pq)/2 + (pr<->qs)
x2 -= numpy.dot((gpq+gpq.T), x1) * .5
# part2
# (-2Vhf_{sp}\delta_{qr}R_pq - 2Vhf_{qr}\delta_{sp}R_rs)/2 + (pr<->qs)
x2[:ncore] += reduce(numpy.dot, (x1[:ncore,ncore:], vhf_ca[ncore:])) * 2
# part3
# (-Vhf_{sp}gamma_{qr}R_{pq} - Vhf_{qr}gamma_{sp}R_{rs})/2 + (pr<->qs)
x2[ncore:nocc] += reduce(numpy.dot, (casdm1, x1[ncore:nocc], eris.vhf_c))
# part1
x2[:,ncore:nocc] += numpy.einsum('purv,rv->pu', hdm2, x1[:,ncore:nocc])
if ncore > 0:
# part4, part5, part6
# Due to x1_rs [4(pq|sr) + 4(pq|rs) - 2(pr|sq) - 2(ps|rq)] for r>s p>q,
# == -x1_sr [4(pq|sr) + 4(pq|rs) - 2(pr|sq) - 2(ps|rq)] for r>s p>q,
# x2[:,:ncore] += H * x1[:,:ncore] => (becuase x1=-x1.T) =>
# x2[:,:ncore] += -H' * x1[:ncore] => (becuase x2-x2.T) =>
# x2[:ncore] += H' * x1[:ncore]
va, vc = casscf.update_jk_in_ah(mo, x1, casdm1, eris)
x2[ncore:nocc] += va
x2[:ncore,ncore:] += vc
# H_oc
s10 = ci1.dot(ci0) * 2
x2[:,:ncore] += ((h1e_mo[:,:ncore]+eris.vhf_c[:,:ncore]) * s10 + vhf_a) * 2
x2[:,ncore:nocc] += numpy.dot(h1e_mo[:,ncore:nocc]+eris.vhf_c[:,ncore:nocc], tdm1)
x2[:,ncore:nocc] += g_dm2
x2 -= s10 * gpq
# (pr<->qs)
x2 = x2 - x2.T
return numpy.hstack((casscf.pack_uniq_var(x2)*2, hci1*2))
return g_all, g_update, h_op, hdiag_all
def extract_rotation(casscf, dr, u, ci0):
ngorb = dr.size - ci0.size
u = numpy.dot(u, casscf.update_rotate_matrix(dr[:ngorb]))
ci1 = ci0.ravel() + dr[ngorb:]
ci1 *= 1./numpy.linalg.norm(ci1)
return u, ci1
def update_orb_ci(casscf, mo, ci0, eris, x0_guess=None,
conv_tol_grad=1e-4, max_stepsize=None, verbose=None):
log = logger.new_logger(casscf, verbose)
if max_stepsize is None:
max_stepsize = casscf.max_stepsize
nmo = mo.shape[1]
ci0 = ci0.ravel()
g_all, g_update, h_op, h_diag = gen_g_hop(casscf, mo, ci0, eris)
ngorb = g_all.size - ci0.size
g_kf = g_all
norm_gkf = norm_gall = numpy.linalg.norm(g_all)
log.debug(' |g|=%5.3g (%4.3g %4.3g) (keyframe)', norm_gall,
numpy.linalg.norm(g_all[:ngorb]),
numpy.linalg.norm(g_all[ngorb:]))
def precond(x, e):
if callable(h_diag):
x = h_diag(x, e-casscf.ah_level_shift)
else:
hdiagd = h_diag-(e-casscf.ah_level_shift)
hdiagd[abs(hdiagd)<1e-8] = 1e-8
x = x/hdiagd
x *= 1/numpy.linalg.norm(x)
return x
def scale_down_step(dxi, hdxi):
dxmax = abs(dxi).max()
if dxmax > casscf.max_stepsize:
scale = casscf.max_stepsize / dxmax
log.debug1('Scale rotation by %g', scale)
dxi *= scale
hdxi *= scale
return dxi, hdxi
class Statistic:
def __init__(self):
self.imic = 0
self.tot_hop = 0
self.tot_kf = 1 # The call to gen_g_hop
if x0_guess is None:
x0_guess = g_all
g_op = lambda: g_all
stat = Statistic()
dr = 0
ikf = 0
u = numpy.eye(nmo)
ci_kf = ci0
if norm_gall < conv_tol_grad*.3:
return u, ci_kf, norm_gall, stat, x0_guess
for ah_conv, ihop, w, dxi, hdxi, residual, seig \
in ciah.davidson_cc(h_op, g_op, precond, x0_guess,
tol=casscf.ah_conv_tol, max_cycle=casscf.ah_max_cycle,
lindep=casscf.ah_lindep, verbose=log):
stat.tot_hop = ihop
norm_residual = numpy.linalg.norm(residual)
if (ah_conv or ihop == casscf.ah_max_cycle or # make sure to use the last step
((norm_residual < casscf.ah_start_tol) and (ihop >= casscf.ah_start_cycle)) or
(seig < casscf.ah_lindep)):
stat.imic += 1
dxmax = abs(dxi).max()
dxi, hdxi = scale_down_step(dxi, hdxi)
dr += dxi
g_all = g_all + hdxi
norm_dr = numpy.linalg.norm(dr)
norm_gall = numpy.linalg.norm(g_all)
norm_gorb = numpy.linalg.norm(g_all[:ngorb])
norm_gci = numpy.linalg.norm(g_all[ngorb:])
log.debug(' imic %d(%d) |g|=%3.2e (%2.1e %2.1e) |dxi|=%3.2e '
'max(x)=%3.2e |dr|=%3.2e eig=%2.1e seig=%2.1e',
stat.imic, ihop, norm_gall, norm_gorb, norm_gci, numpy.linalg.norm(dxi),
dxmax, norm_dr, w, seig)
max_cycle = max(casscf.max_cycle_micro,
casscf.max_cycle_micro-int(numpy.log(norm_gkf+1e-7)*2))
log.debug1('Set max_cycle %d', max_cycle)
ikf += 1
if stat.imic > 3 and norm_gall > norm_gkf*casscf.ah_trust_region:
g_all = g_all - hdxi
dr -= dxi
norm_gall = numpy.linalg.norm(g_all)
log.debug('|g| >> keyframe, Restore previouse step')
break
elif (stat.imic >= max_cycle or norm_gall < conv_tol_grad*.3):
break
elif ((ikf >= max(casscf.kf_interval, casscf.kf_interval-numpy.log(norm_dr+1e-7)) or
# Insert keyframe if the keyframe and the esitimated grad are too different
norm_gall < norm_gkf/casscf.kf_trust_region)):
ikf = 0
u, ci_kf = extract_rotation(casscf, dr, u, ci_kf)
dr[:] = 0
g_kf1 = g_update(u, ci_kf)
stat.tot_kf += 1
norm_gkf1 = numpy.linalg.norm(g_kf1)
norm_gorb = numpy.linalg.norm(g_kf1[:ngorb])
norm_gci = numpy.linalg.norm(g_kf1[ngorb:])
norm_dg = numpy.linalg.norm(g_kf1-g_all)
log.debug('Adjust keyframe to |g|= %4.3g (%4.3g %4.3g) '
'|g-correction|= %4.3g',
norm_gkf1, norm_gorb, norm_gci, norm_dg)
if (norm_dg < norm_gall*casscf.ah_trust_region # kf not too diff
#or norm_gkf1 < norm_gkf # grad is decaying
# close to solution
or norm_gkf1 < conv_tol_grad*casscf.ah_trust_region):
g_all = g_kf = g_kf1
g_kf1 = None
norm_gall = norm_gkf = norm_gkf1
else:
g_all = g_all - hdxi
dr -= dxi
norm_gall = norm_gkf = numpy.linalg.norm(g_all)
log.debug('Out of trust region. Restore previouse step')
break
u, ci_kf = extract_rotation(casscf, dr, u, ci_kf)
log.debug(' tot inner=%d |g|= %4.3g (%4.3g %4.3g) |u-1|= %4.3g |dci|= %4.3g',
stat.imic, norm_gall, norm_gorb, norm_gci,
numpy.linalg.norm(u-numpy.eye(nmo)),
numpy.linalg.norm(ci_kf-ci0))
return u, ci_kf, norm_gkf, stat, dxi
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=None,
ci0=None, callback=None, verbose=logger.NOTE, dump_chk=True):
'''CASSCF solver
'''
log = logger.new_logger(casscf, verbose)
cput0 = (time.clock(), time.time())
log.debug('Start newton CASSCF')
if callback is None:
callback = casscf.callback
mo = mo_coeff
nmo = mo_coeff.shape[1]
#TODO: lazy evaluate eris, to leave enough memory for FCI solver
eris = casscf.ao2mo(mo)
e_tot, e_ci, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if casscf.ncas == nmo and not casscf.internal_rotation:
if casscf.canonicalization:
log.debug('CASSCF canonicalization')
mo, fcivec, mo_energy = casscf.canonicalize(mo, fcivec, eris, False,
casscf.natorb, verbose=log)
return True, e_tot, e_ci, fcivec, mo, mo_energy
casdm1 = casscf.fcisolver.make_rdm1(fcivec, casscf.ncas, casscf.nelecas)
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 3
conv = False
totmicro = totinner = 0
norm_gorb = norm_gci = -1
de, elast = e_tot, e_tot
dr0 = None
t2m = t1m = log.timer('Initializing newton CASSCF', *cput0)
imacro = 0
tot_hop = 0
tot_kf = 0
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
u, fcivec, norm_gall, stat, dr0 = \
update_orb_ci(casscf, mo, fcivec, eris, dr0, conv_tol_grad*.3, verbose=log)
tot_hop += stat.tot_hop
tot_kf += stat.tot_kf
t2m = log.timer('update_orb_ci', *t2m)
eris = None
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t2m = log.timer('update eri', *t2m)
e_tot, e_ci, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
log.timer('CASCI solver', *t2m)
t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
if (abs(de) < tol and norm_gall < conv_tol_grad):
conv = True
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('newton CASSCF converged in %d macro (%d KF %d Hx) steps',
imacro, tot_kf, tot_hop)
else:
log.info('newton CASSCF not converged, %d macro (%d KF %d Hx) steps',
imacro, tot_kf, tot_hop)
casdm1 = casscf.fcisolver.make_rdm1(fcivec, casscf.ncas, casscf.nelecas)
if casscf.canonicalization:
log.info('CASSCF canonicalization')
mo, fcivec, mo_energy = \
casscf.canonicalize(mo, fcivec, eris, False, casscf.natorb, casdm1, log)
if casscf.natorb: # dump_chk may save casdm1
occ, ucas = casscf._eig(-casdm1, ncore, nocc)[0]
casdm1 = -occ
if dump_chk:
casscf.dump_chk(locals())
log.timer('newton CASSCF', *cput0)
return conv, e_tot, e_ci, fcivec, mo, mo_energy
class CASSCF(mc1step.CASSCF):
__doc__ = casci.CASCI.__doc__ + '''CASSCF
Extra attributes for CASSCF:
conv_tol : float
Converge threshold. Default is 1e-7
conv_tol_grad : float
Converge threshold for CI gradients and orbital rotation gradients.
Default is 1e-4
max_stepsize : float
The step size for orbital rotation. Small step (0.005 - 0.05) is prefered.
(see notes in max_cycle_micro_inner attribute)
Default is 0.03.
max_cycle_macro : int
Max number of macro iterations. Default is 50.
max_cycle_micro : int
Max number of micro (CIAH) iterations in each macro iteration.
ah_level_shift : float, for AH solver.
Level shift for the Davidson diagonalization in AH solver. Default is 1e-8.
ah_conv_tol : float, for AH solver.
converge threshold for AH solver. Default is 1e-12.
ah_max_cycle : float, for AH solver.
Max number of iterations allowd in AH solver. Default is 30.
ah_lindep : float, for AH solver.
Linear dependence threshold for AH solver. Default is 1e-14.
ah_start_tol : flat, for AH solver.
In AH solver, the orbital rotation is started without completely solving the AH problem.
This value is to control the start point. Default is 0.2.
ah_start_cycle : int, for AH solver.
In AH solver, the orbital rotation is started without completely solving the AH problem.
This value is to control the start point. Default is 2.
``ah_conv_tol``, ``ah_max_cycle``, ``ah_lindep``, ``ah_start_tol`` and ``ah_start_cycle``
can affect the accuracy and performance of CASSCF solver. Lower
``ah_conv_tol`` and ``ah_lindep`` might improve the accuracy of CASSCF
optimization, but decrease the performance.
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.UHF(mol)
>>> mf.scf()
>>> mc = mcscf.CASSCF(mf, 6, 6)
>>> mc.conv_tol = 1e-10
>>> mc.ah_conv_tol = 1e-5
>>> mc.kernel()
-109.044401898486001
>>> mc.ah_conv_tol = 1e-10
>>> mc.kernel()
-109.044401887945668
chkfile : str
Checkpoint file to save the intermediate orbitals during the CASSCF optimization.
Default is the checkpoint file of mean field object.
callback : function(envs_dict) => None
callback function takes one dict as the argument which is
generated by the builtin function :func:`locals`, so that the
callback function can access all local variables in the current
envrionment.
Saved results
e_tot : float
Total MCSCF energy (electronic energy plus nuclear repulsion)
ci : ndarray
CAS space FCI coefficients
converged : bool
It indicates CASSCF optimization converged or not.
mo_coeff : ndarray
Optimized CASSCF orbitals coefficients
Examples:
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = scf.RHF(mol)
>>> mf.scf()
>>> mc = mcscf.CASSCF(mf, 6, 6)
>>> mc.kernel()[0]
-109.044401882238134
'''
def __init__(self, mf, ncas, nelecas, ncore=None, frozen=None):
casci.CASCI.__init__(self, mf, ncas, nelecas, ncore)
self.frozen = frozen
# the max orbital rotation and CI increment, prefer small step size
self.max_stepsize = .03
self.max_cycle_macro = 50
self.max_cycle_micro = 10
self.conv_tol = 1e-7
self.conv_tol_grad = None
# for augmented hessian
self.ah_level_shift = 1e-8
self.ah_conv_tol = 1e-12
self.ah_max_cycle = 30
self.ah_lindep = 1e-14
self.ah_start_tol = 5e2
self.ah_start_cycle = 3
self.ah_trust_region = 3.
self.kf_trust_region = 3.
self.kf_interval = 5
self.internal_rotation = False
self.chkfile = mf.chkfile
self.callback = None
self.chk_ci = False
self.fcisolver.max_cycle = 25
#self.fcisolver.max_space = 25
##################################################
# don't modify the following attributes, they are not input options
self.e_tot = None
self.e_cas = None
self.ci = None
self.mo_coeff = mf.mo_coeff
self.mo_energy = mf.mo_energy
self.converged = False
self._max_stepsize = None
self._keys = set(self.__dict__.keys())
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('')
log.info('******** %s flags ********', self.__class__)
nvir = self.mo_coeff.shape[1] - self.ncore - self.ncas
log.info('CAS (%de+%de, %do), ncore = %d, nvir = %d', \
self.nelecas[0], self.nelecas[1], self.ncas, self.ncore, nvir)
assert(nvir > 0 and self.ncore > 0 and self.ncas > 0)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max_cycle_macro = %d', self.max_cycle_macro)
log.info('max_cycle_micro = %d', self.max_cycle_micro)
log.info('conv_tol = %g', self.conv_tol)
log.info('conv_tol_grad = %s', self.conv_tol_grad)
log.info('orbital rotation max_stepsize = %g', self.max_stepsize)
log.info('augmented hessian ah_max_cycle = %d', self.ah_max_cycle)
log.info('augmented hessian ah_conv_tol = %g', self.ah_conv_tol)
log.info('augmented hessian ah_linear dependence = %g', self.ah_lindep)
log.info('augmented hessian ah_level shift = %d', self.ah_level_shift)
log.info('augmented hessian ah_start_tol = %g', self.ah_start_tol)
log.info('augmented hessian ah_start_cycle = %d', self.ah_start_cycle)
log.info('augmented hessian ah_trust_region = %g', self.ah_trust_region)
log.info('kf_trust_region = %g', self.kf_trust_region)
log.info('kf_interval = %d', self.kf_interval)
log.info('natorb = %s', self.natorb)
log.info('canonicalization = %s', self.canonicalization)
log.info('chkfile = %s', self.chkfile)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
log.info('internal_rotation = %s', self.internal_rotation)
try:
self.fcisolver.dump_flags(self.verbose)
except AttributeError:
pass
if hasattr(self, 'max_orb_stepsize'):
log.warn('Attribute "max_orb_stepsize" was replaced by "max_stepsize"')
if self.mo_coeff is None:
log.warn('Orbital for CASCI is not specified. You probably need '
'call SCF.kernel() to initialize orbitals.')
return self
def kernel(self, mo_coeff=None, ci0=None, callback=None):
return mc1step.CASSCF.kernel(self, mo_coeff, ci0, callback, kernel)
def casci(self, mo_coeff, ci0=None, eris=None, verbose=None, envs=None):
log = logger.new_logger(self, verbose)
if eris is None:
fcasci = copy.copy(self)
fcasci.ao2mo = self.get_h2cas
else:
fcasci = mc1step._fake_h_for_fast_casci(self, mo_coeff, eris)
e_tot, e_ci, fcivec = casci.kernel(fcasci, mo_coeff, ci0, log)
if not isinstance(e_ci, (float, numpy.number)):
raise RuntimeError('Multiple roots are detected in fcisolver. '
'CASSCF does not know which state to optimize.\n'
'See also mcscf.state_average or mcscf.state_specific for excited states.')
if envs is not None and log.verbose >= logger.INFO:
log.debug('CAS space CI energy = %.15g', e_ci)
if hasattr(self.fcisolver,'spin_square'):
ss = self.fcisolver.spin_square(fcivec, self.ncas, self.nelecas)
else:
ss = None
if 'imacro' in envs: # Within CASSCF iteration
stat = envs['stat']
if ss is None:
log.info('macro %d (%d JK %d micro), '
'CASSCF E = %.15g dE = %.4g |grad|=%5.3g',
envs['imacro'], stat.tot_hop+stat.tot_kf, stat.imic,
e_tot, e_tot-envs['elast'], envs['norm_gall'])
else:
log.info('macro %d (%d JK %d micro), '
'CASSCF E = %.15g dE = %.4g |grad|=%5.3g S^2 = %.7f',
envs['imacro'], stat.tot_hop+stat.tot_kf, stat.imic,
e_tot, e_tot-envs['elast'], envs['norm_gall'], ss[0])
else: # Initialization step
elast = envs.get('elast', 0)
if ss is None:
log.info('CASCI E = %.15g', e_tot)
else:
log.info('CASCI E = %.15g dE = %.8g S^2 = %.7f',
e_tot, e_tot-elast, ss[0])
return e_tot, e_ci, fcivec
def update_ao2mo(self, mo):
raise DeprecationWarning('update_ao2mo was obseleted since pyscf v1.0. '
'Use .ao2mo method instead')
# Don't remove the two functions. They are used in df/approx_hessian code
def get_h2eff(self, mo_coeff=None):
return self.get_h2cas(mo_coeff)
def get_h2cas(self, mo_coeff=None):
return casci.CASCI.ao2mo(self, mo_coeff)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
import pyscf.fci
from pyscf.mcscf import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.22013929407)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -15.950852049859-mol.energy_nuc())
mol.atom = [
['H', ( 5.,-1. , 1. )],
['H', ( 0.,-5. ,-2. )],
['H', ( 4.,-0.5 ,-3. )],
['H', ( 0.,-4.5 ,-1. )],
['H', ( 3.,-0.5 ,-0. )],
['H', ( 0.,-3. ,-1. )],
['H', ( 2.,-2.5 , 0. )],
['H', ( 1., 1. , 3. )],
]
mol.basis = {'H': 'sto-3g',
'O': '6-31g',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
emc = kernel(CASSCF(m, 4, 4), m.mo_coeff, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -3.62638367550087, emc - -3.6268060528596635)
mc = CASSCF(m, 4, (3,1))
mc.verbose = 4
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
emc = kernel(mc, m.mo_coeff, verbose=4)[1]
print(emc - -3.62638367550087)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
mc = CASSCF(m, 6, 4)
mc.fcisolver = pyscf.fci.solver(mol)
mc.verbose = 4
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
emc = mc.mc1step(mo)[0]
print(ehf, emc, emc-ehf)
#-76.0267656731 -76.0873922924 -0.0606266193028
print(emc - -76.0873923174, emc - -76.0926176464)
mc = CASSCF(m, 6, (3,1))
mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)
#mc.fcisolver = pyscf.fci.direct_spin1
mc.fcisolver = pyscf.fci.solver(mol, False)
mc.verbose = 4
emc = mc.mc1step(mo)[0]
#mc.analyze()
print(emc - -75.7155632535814)
mc.internal_rotation = True
emc = mc.mc1step(mo)[0]
print(emc - -75.7155632535814)
| 21,464 | 3,757 | 91 |
91d80edfac013cdabd9ac49fea3776cbe574cee5 | 8,441 | py | Python | models/SegNetWithSkipConnection.py | Suraj0712/SegNet | efb2298f36944cbae65b44f15fdc458ebca0b224 | [
"MIT"
] | null | null | null | models/SegNetWithSkipConnection.py | Suraj0712/SegNet | efb2298f36944cbae65b44f15fdc458ebca0b224 | [
"MIT"
] | null | null | null | models/SegNetWithSkipConnection.py | Suraj0712/SegNet | efb2298f36944cbae65b44f15fdc458ebca0b224 | [
"MIT"
] | null | null | null | import torch
import torchvision
from torchvision import transforms, datasets
import torchvision.transforms as standard_transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
import sys
# net = SegNetWithSkipConnection()
# net.zero_grad()
# DATA_PATH = '/home/sur/SemSeg/cityscape/'
# train = datasets.Cityscapes(DATA_PATH, split = 'train', mode = 'fine', target_type = 'semantic',transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# test = datasets.Cityscapes(DATA_PATH, split = 'test', mode = 'fine', target_type = 'semantic' ,transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# val = datasets.Cityscapes(DATA_PATH, split = 'val', mode = 'fine', target_type = 'semantic' ,transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# trainset = torch.utils.data.DataLoader(train, batch_size=2, shuffle=True)
# testset = torch.utils.data.DataLoader(test, batch_size=2, shuffle=False)
# valset = torch.utils.data.DataLoader(val, batch_size=2, shuffle=True)
# for data in trainset:
# X, y = data
# print(X.size(),y.size())
# output = net(X)
# break
| 49.362573 | 271 | 0.727047 | import torch
import torchvision
from torchvision import transforms, datasets
import torchvision.transforms as standard_transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
import sys
class SegNetWithSkipConnection(nn.Module) :
def __init__(self):
super(SegNetWithSkipConnection,self).__init__()
self.layer10_conv = nn.Conv2d(3,64,3,1,padding = (1,1))
self.layer10_batch = nn.BatchNorm2d(64, affine = False)
self.layer11_conv = nn.Conv2d(64,64,3,1,padding=(1,1))
self.layer11_batch = nn.BatchNorm2d(64, affine = False)
self.layer20_conv = nn.Conv2d(64,128,3,2,padding=(1,1))
self.layer20_batch = nn.BatchNorm2d(128, affine = False)
self.layer21_conv = nn.Conv2d(128,128,3,1,padding=(1,1))
self.layer21_batch = nn.BatchNorm2d(128, affine = False)
self.layer30_conv = nn.Conv2d(128,256,3,2,padding=(1,1))
self.layer30_batch = nn.BatchNorm2d(256, affine = False)
self.layer31_conv = nn.Conv2d(256,256,3,1,padding=(1,1))
self.layer31_batch = nn.BatchNorm2d(256, affine = False)
self.layer32_conv = nn.Conv2d(256,256,3,1,padding=(1,1))
self.layer32_batch = nn.BatchNorm2d(256, affine = False)
self.layer40_conv = nn.Conv2d(256,512,3,2,padding=(1,1))
self.layer40_batch = nn.BatchNorm2d(512, affine = False)
self.layer41_conv = nn.Conv2d(512,512,3,1,padding=(1,1))
self.layer41_batch = nn.BatchNorm2d(512, affine = False)
self.layer42_conv = nn.Conv2d(512,512,3,1,padding=(1,1))
self.layer42_batch = nn.BatchNorm2d(512, affine = False)
self.layer50_conv = nn.Conv2d(512,512,3,2,padding=(1,1))
self.layer50_batch = nn.BatchNorm2d(512, affine = False)
self.layer51_conv = nn.Conv2d(512,512,3,1,padding=(1,1))
self.layer51_batch = nn.BatchNorm2d(512, affine = False)
self.layer52_conv = nn.Conv2d(512,512,3,1,padding=(1,1))
self.layer52_batch = nn.BatchNorm2d(512, affine = False)
self.decoder_layer52_conv = nn.ConvTranspose2d(512,512,3,1,padding=(1,1))
self.decoder_layer52_batch = nn.BatchNorm2d(512, affine = False)
self.decoder_layer51_conv = nn.ConvTranspose2d(512,512,3,1,padding=(1,1))
self.decoder_layer51_batch = nn.BatchNorm2d(512, affine = False)
self.decoder_layer50_conv = nn.ConvTranspose2d(512,512,3,2,padding=(1,1),output_padding=(1,1))
self.decoder_layer50_batch = nn.BatchNorm2d(512, affine = False)
self.decoder_layer42_conv = nn.ConvTranspose2d(1024,512,3,1,padding=(1,1))
self.decoder_layer42_batch = nn.BatchNorm2d(512, affine = False)
self.decoder_layer41_conv = nn.ConvTranspose2d(512,512,3,1,padding=(1,1))
self.decoder_layer41_batch = nn.BatchNorm2d(512, affine = False)
self.decoder_layer40_conv = nn.ConvTranspose2d(512,256,3,2,padding=(1,1),output_padding=(1,1))
self.decoder_layer40_batch = nn.BatchNorm2d(256, affine = False)
self.decoder_layer32_conv = nn.ConvTranspose2d(512,256,3,1,padding=(1,1))
self.decoder_layer32_batch = nn.BatchNorm2d(256, affine = False)
self.decoder_layer31_conv = nn.ConvTranspose2d(256,256,3,1,padding=(1,1))
self.decoder_layer31_batch = nn.BatchNorm2d(256, affine = False)
self.decoder_layer30_conv = nn.ConvTranspose2d(256,128,3,2,padding=(1,1),output_padding=(1,1))
self.decoder_layer30_batch = nn.BatchNorm2d(128, affine = False)
self.decoder_layer21_conv = nn.ConvTranspose2d(256,128,3,1,padding=(1,1))
self.decoder_layer21_batch = nn.BatchNorm2d(128, affine = False)
self.decoder_layer20_conv = nn.ConvTranspose2d(128,64,3,2,padding=(1,1),output_padding=(1,1))
self.decoder_layer20_batch = nn.BatchNorm2d(64, affine = False)
self.decoder_layer11_conv = nn.ConvTranspose2d(128,64,3,1,padding=(1,1))
self.decoder_layer11_batch = nn.BatchNorm2d(64, affine = False)
self.decoder_layer10_conv = nn.ConvTranspose2d(64,34,3,1,padding=(1,1))
def forward(self,x):
x10 = F.relu(self.layer10_batch(self.layer10_conv(x)))
x11 = F.relu(self.layer11_batch(self.layer11_conv(x10)))
x1 , x1_indices = F.max_pool2d(x11,kernel_size=2,stride=2,return_indices=True)
x20 = F.relu(self.layer20_batch(self.layer20_conv(x11)))
x21 = F.relu(self.layer21_batch(self.layer21_conv(x20)))
x2, x2_indices = F.max_pool2d(x21,kernel_size=2,stride=2,return_indices=True)
x30 = F.relu(self.layer30_batch(self.layer30_conv(x21)))
x31 = F.relu(self.layer31_batch(self.layer31_conv(x30)))
x32 = F.relu(self.layer32_batch(self.layer32_conv(x31)))
x3 , x3_indices = F.max_pool2d(x32,kernel_size=2,stride=2,return_indices=True)
x40 = F.relu(self.layer40_batch(self.layer40_conv(x32)))
x41 = F.relu(self.layer41_batch(self.layer41_conv(x40)))
x42 = F.relu(self.layer42_batch(self.layer42_conv(x41)))
x4, x4_indices = F.max_pool2d(x42,kernel_size=2,stride=2,return_indices=True)
x50 = F.relu(self.layer50_batch(self.layer50_conv(x42)))
x51 = F.relu(self.layer51_batch(self.layer51_conv(x50)))
x52 = F.relu(self.layer52_batch(self.layer52_conv(x51)))
x5, x5_indices = F.max_pool2d(x52,kernel_size=2,stride=2,return_indices=True)
x52_dec = F.relu(self.decoder_layer52_batch(self.decoder_layer52_conv(x52)))
x51_dec = F.relu(self.decoder_layer51_batch(self.decoder_layer51_conv(x52_dec)))
x5_decoder_output = F.max_unpool2d(x5,x5_indices,kernel_size = 2,stride = 2)
x51_dec = x51_dec+x5_decoder_output
x50_dec = F.relu(self.decoder_layer50_batch(self.decoder_layer50_conv(x51_dec)))
x42_dec = F.relu(self.decoder_layer42_batch(self.decoder_layer42_conv(torch.cat((x50_dec,x42),1))))
x41_dec = F.relu(self.decoder_layer41_batch(self.decoder_layer41_conv(x42_dec)))
x4_decoder_output = F.max_unpool2d(x4,x4_indices,kernel_size = 2,stride = 2)
x41_dec = x41_dec+x4_decoder_output
x40_dec = F.relu(self.decoder_layer40_batch(self.decoder_layer40_conv(x41_dec)))
x32_dec = F.relu(self.decoder_layer32_batch(self.decoder_layer32_conv(torch.cat((x40_dec,x32),1))))
x31_dec = F.relu(self.decoder_layer31_batch(self.decoder_layer31_conv(x32_dec)))
x3_decoder_output = F.max_unpool2d(x3,x3_indices,kernel_size = 2,stride = 2)
x31_dec = x31_dec+x3_decoder_output
x30_dec = F.relu(self.decoder_layer30_batch(self.decoder_layer30_conv(x31_dec)))
x21_dec = F.relu(self.decoder_layer21_batch(self.decoder_layer21_conv(torch.cat((x30_dec,x21),1))))
x2_decoder_output = F.max_unpool2d(x2,x2_indices,kernel_size = 2,stride = 2)
x21_dec = x21_dec+x2_decoder_output
x20_dec = F.relu(self.decoder_layer20_batch(self.decoder_layer20_conv(x21_dec)))
x11_dec = F.relu(self.decoder_layer11_batch(self.decoder_layer11_conv(torch.cat((x20_dec,x11),1))))
x1_decoder_output = F.max_unpool2d(x1,x1_indices,kernel_size = 2,stride = 2)
x11_dec = x11_dec+x1_decoder_output
x10_dec = F.relu(self.decoder_layer10_conv(x11_dec))
x_out = F.softmax(x10_dec,dim=1)
return x_out
# Test code
# net = SegNetWithSkipConnection()
# net.zero_grad()
# DATA_PATH = '/home/sur/SemSeg/cityscape/'
# train = datasets.Cityscapes(DATA_PATH, split = 'train', mode = 'fine', target_type = 'semantic',transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# test = datasets.Cityscapes(DATA_PATH, split = 'test', mode = 'fine', target_type = 'semantic' ,transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# val = datasets.Cityscapes(DATA_PATH, split = 'val', mode = 'fine', target_type = 'semantic' ,transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]),target_transform=transforms.Compose([transforms.Resize((256,512)),transforms.ToTensor()]))
# trainset = torch.utils.data.DataLoader(train, batch_size=2, shuffle=True)
# testset = torch.utils.data.DataLoader(test, batch_size=2, shuffle=False)
# valset = torch.utils.data.DataLoader(val, batch_size=2, shuffle=True)
# for data in trainset:
# X, y = data
# print(X.size(),y.size())
# output = net(X)
# break
| 6,766 | 22 | 72 |
b460e74e76bbe17458d520ae8477b491912a4988 | 479 | py | Python | test-integration/test_integration/fixtures/yielding-file-project/predict.py | wx-b/cog | 5dc27a2c2e117cc5ac4f896aa0881617b36bbfc0 | [
"Apache-2.0"
] | null | null | null | test-integration/test_integration/fixtures/yielding-file-project/predict.py | wx-b/cog | 5dc27a2c2e117cc5ac4f896aa0881617b36bbfc0 | [
"Apache-2.0"
] | null | null | null | test-integration/test_integration/fixtures/yielding-file-project/predict.py | wx-b/cog | 5dc27a2c2e117cc5ac4f896aa0881617b36bbfc0 | [
"Apache-2.0"
] | null | null | null | from typing import Iterator
from cog import BasePredictor, Path
| 28.176471 | 52 | 0.574113 | from typing import Iterator
from cog import BasePredictor, Path
class Predictor(BasePredictor):
def predict(self, path: Path) -> Iterator[Path]:
with open(path) as f:
prefix = f.read()
predictions = ["foo", "bar", "baz"]
for i, prediction in enumerate(predictions):
out_path = Path(f"/tmp/out-{i}.txt")
with out_path.open("w") as f:
f.write(prefix + " " + prediction)
yield out_path
| 354 | 10 | 49 |
9a4d98708ccbc9b36ec24522c149f20f024b0cb4 | 20,247 | py | Python | wizbin/launchers.py | RogueScholar/debreate | 0abc168c51336b31ff87c61f84bc7bb6000e88f4 | [
"MIT"
] | 97 | 2016-09-16T08:44:04.000Z | 2022-01-29T22:30:18.000Z | wizbin/launchers.py | RogueScholar/debreate | 0abc168c51336b31ff87c61f84bc7bb6000e88f4 | [
"MIT"
] | 34 | 2016-09-20T00:42:45.000Z | 2021-04-16T07:21:44.000Z | wizbin/launchers.py | RogueScholar/debreate | 0abc168c51336b31ff87c61f84bc7bb6000e88f4 | [
"MIT"
] | 24 | 2016-09-16T08:44:56.000Z | 2021-07-29T11:32:47.000Z | # -*- coding: utf-8 -*-
## \package wizbin.launchers
# MIT licensing
# See: docs/LICENSE.txt
import os, shutil, wx
from dbr.language import GT
from dbr.log import DebugEnabled
from dbr.log import Logger
from fileio.fileio import ReadFile
from fileio.fileio import WriteFile
from globals.ident import btnid
from globals.ident import chkid
from globals.ident import inputid
from globals.ident import listid
from globals.ident import pgid
from globals.ident import txtid
from globals.strings import GS
from globals.strings import TextIsEmpty
from globals.tooltips import SetPageToolTips
from input.list import ListCtrl
from input.select import ComboBox
from input.select import ComboBoxESS
from input.text import TextArea
from input.text import TextAreaESS
from input.text import TextAreaPanel
from input.toggle import CheckBox
from input.toggle import CheckBoxESS
from ui.button import CreateButton
from ui.dialog import ConfirmationDialog
from ui.dialog import ShowDialog
from ui.dialog import ShowErrorDialog
from ui.layout import BoxSizer
from ui.style import layout as lyt
from ui.textpreview import TextPreview
from wiz.helper import GetAllTypeFields
from wiz.helper import GetField
from wiz.helper import GetMainWindow
from wiz.wizard import WizardPage
## Page for creating a system menu launcher
## Constructor
#
# \param parent
# Parent <b><i>wx.Window</i></b> instance
## Retrieves page data for export
## Formats the launcher information for export
## Retrieves the filename to be used for the menu launcher
## TODO: Doxygen
## TODO: Doxygen
## Handles button event from clear categories button
## Saves launcher information to file
#
# FIXME: Might be problems with reading/writing launchers (see OnLoadLauncher)
# 'Others' field not being completely filled out.
## Loads a .desktop launcher's data
#
# FIXME: Might be problems with reading/writing launchers (see OnExportLauncher)
# 'Others' field not being completely filled out.
## TODO: Doxygen
## TODO: Doxygen
## Enables/Disables fields for creating a launcher
## Resets all fields to default values
## TODO: Doxygen
## Fills out launcher information from loaded file
#
# \param data
# Information to fill out menu launcher fields
# \param enabled
# \b \e bool : Launcher will be flagged for export if True
| 29.995556 | 103 | 0.694572 | # -*- coding: utf-8 -*-
## \package wizbin.launchers
# MIT licensing
# See: docs/LICENSE.txt
import os, shutil, wx
from dbr.language import GT
from dbr.log import DebugEnabled
from dbr.log import Logger
from fileio.fileio import ReadFile
from fileio.fileio import WriteFile
from globals.ident import btnid
from globals.ident import chkid
from globals.ident import inputid
from globals.ident import listid
from globals.ident import pgid
from globals.ident import txtid
from globals.strings import GS
from globals.strings import TextIsEmpty
from globals.tooltips import SetPageToolTips
from input.list import ListCtrl
from input.select import ComboBox
from input.select import ComboBoxESS
from input.text import TextArea
from input.text import TextAreaESS
from input.text import TextAreaPanel
from input.toggle import CheckBox
from input.toggle import CheckBoxESS
from ui.button import CreateButton
from ui.dialog import ConfirmationDialog
from ui.dialog import ShowDialog
from ui.dialog import ShowErrorDialog
from ui.layout import BoxSizer
from ui.style import layout as lyt
from ui.textpreview import TextPreview
from wiz.helper import GetAllTypeFields
from wiz.helper import GetField
from wiz.helper import GetMainWindow
from wiz.wizard import WizardPage
## Page for creating a system menu launcher
class Page(WizardPage):
## Constructor
#
# \param parent
# Parent <b><i>wx.Window</i></b> instance
def __init__(self, parent):
WizardPage.__init__(self, parent, pgid.MENU) #, name=GT(u'Menu Launcher'))
## Override default label
self.Label = GT(u'Menu Launcher')
# --- Buttons to open/preview/save .desktop file
btn_open = CreateButton(self, btnid.BROWSE, GT(u'Browse'), u'browse', name=u'btn browse')
btn_save = CreateButton(self, btnid.SAVE, GT(u'Save'), u'save', name=u'btn save')
btn_preview = CreateButton(self, btnid.PREVIEW, GT(u'Preview'), u'preview', name=u'btn preview')
# --- CHECKBOX
chk_enable = CheckBox(self, chkid.ENABLE, GT(u'Create system menu launcher'))
# --- TYPE
opts_type = (u'Application', u'Link', u'Directory',)
txt_type = wx.StaticText(self, label=GT(u'Type'), name=u'type')
ti_type = ComboBoxESS(self, inputid.TYPE, choices=opts_type,
name=u'Type', defaultValue=opts_type[0])
# --- ENCODING
opts_enc = (
u'UTF-1', u'UTF-7', u'UTF-8', u'CESU-8', u'UTF-EBCDIC',
u'UTF-16', u'UTF-32', u'SCSU', u'BOCU-1', u'Punycode',
u'GB 18030',
)
txt_enc = wx.StaticText(self, label=GT(u'Encoding'), name=u'encoding')
ti_enc = ComboBoxESS(self, inputid.ENC, choices=opts_enc, name=u'Encoding',
defaultValue=opts_enc[2])
# --- TERMINAL
chk_term = CheckBoxESS(self, chkid.TERM, GT(u'Terminal'), name=u'Terminal')
# --- STARTUP NOTIFY
chk_notify = CheckBoxESS(self, chkid.NOTIFY, GT(u'Startup Notify'), name=u'StartupNotify',
defaultValue=True)
# --- Custom output filename
txt_filename = wx.StaticText(self, txtid.FNAME, GT(u'Filename'), name=u'filename')
ti_filename = TextArea(self, inputid.FNAME, name=txt_filename.Name)
chk_filename = CheckBox(self, chkid.FNAME, GT(u'Use "Name" as output filename (<Name>.desktop)'),
name=u'filename chk', defaultValue=True)
# --- NAME (menu)
txt_name = wx.StaticText(self, label=GT(u'Name'), name=u'name*')
ti_name = TextAreaESS(self, inputid.NAME, name=u'Name')
ti_name.req = True
# --- EXECUTABLE
txt_exec = wx.StaticText(self, label=GT(u'Executable'), name=u'exec')
ti_exec = TextAreaESS(self, inputid.EXEC, name=u'Exec')
# --- COMMENT
txt_comm = wx.StaticText(self, label=GT(u'Comment'), name=u'comment')
ti_comm = TextAreaESS(self, inputid.DESCR, name=u'Comment')
# --- ICON
txt_icon = wx.StaticText(self, label=GT(u'Icon'), name=u'icon')
ti_icon = TextAreaESS(self, inputid.ICON, name=u'Icon')
txt_mime = wx.StaticText(self, label=GT(u'MIME Type'), name=u'mime')
ti_mime = TextAreaESS(self, inputid.MIME, defaultValue=wx.EmptyString, name=u'MimeType')
# ----- OTHER/CUSTOM
txt_other = wx.StaticText(self, label=GT(u'Custom Fields'), name=u'other')
ti_other = TextAreaPanel(self, inputid.OTHER, name=txt_other.Name)
ti_other.EnableDropTarget()
# --- CATEGORIES
opts_category = (
u'2DGraphics',
u'Accessibility', u'Application', u'ArcadeGame', u'Archiving', u'Audio', u'AudioVideo',
u'BlocksGame', u'BoardGame',
u'Calculator', u'Calendar', u'CardGame', u'Compression', u'ContactManagement', u'Core',
u'DesktopSettings', u'Development', u'Dictionary', u'DiscBurning', u'Documentation',
u'Email',
u'FileManager', u'FileTransfer',
u'Game', u'GNOME', u'Graphics', u'GTK',
u'HardwareSettings',
u'InstantMessaging',
u'KDE',
u'LogicGame',
u'Math', u'Monitor',
u'Network',
u'OCR', u'Office',
u'P2P', u'PackageManager', u'Photography', u'Player', u'Presentation', u'Printing',
u'Qt',
u'RasterGraphics', u'Recorder', u'RemoteAccess',
u'Scanning', u'Screensaver', u'Security', u'Settings', u'Spreadsheet', u'System',
u'Telephony', u'TerminalEmulator', u'TextEditor',
u'Utility',
u'VectorGraphics', u'Video', u'Viewer',
u'WordProcessor', u'Wine', u'Wine-Programs-Accessories',
u'X-GNOME-NetworkSettings', u'X-GNOME-PersonalSettings', u'X-GNOME-SystemSettings',
u'X-KDE-More', u'X-Red-Hat-Base', u'X-SuSE-ControlCenter-System',
)
txt_category = wx.StaticText(self, label=GT(u'Categories'), name=u'category')
# This option does not get set by importing a new project
ti_category = ComboBox(self, inputid.CAT, choices=opts_category, name=txt_category.Name,
defaultValue=opts_category[0])
btn_catadd = CreateButton(self, btnid.ADD, GT(u'Add'), u'add', name=u'add category')
btn_catdel = CreateButton(self, btnid.REMOVE, GT(u'Remove'), u'remove', name=u'rm category')
btn_catclr = CreateButton(self, btnid.CLEAR, GT(u'Clear'), u'clear', name=u'clear category')
# FIXME: Allow using multi-select + remove
lst_categories = ListCtrl(self, listid.CAT, name=u'Categories')
# Can't set LC_SINGLE_SEL in constructor for wx 3.0 (ListCtrl bug???)
lst_categories.SetSingleStyle(wx.LC_SINGLE_SEL)
self.OnToggle()
SetPageToolTips(self)
# *** Event Handling *** #
btn_open.Bind(wx.EVT_BUTTON, self.OnLoadLauncher)
btn_save.Bind(wx.EVT_BUTTON, self.OnExportLauncher)
btn_preview.Bind(wx.EVT_BUTTON, self.OnPreviewLauncher)
chk_enable.Bind(wx.EVT_CHECKBOX, self.OnToggle)
chk_filename.Bind(wx.EVT_CHECKBOX, self.OnSetCustomFilename)
wx.EVT_KEY_DOWN(ti_category, self.SetCategory)
wx.EVT_KEY_DOWN(lst_categories, self.SetCategory)
btn_catadd.Bind(wx.EVT_BUTTON, self.SetCategory)
btn_catdel.Bind(wx.EVT_BUTTON, self.SetCategory)
btn_catclr.Bind(wx.EVT_BUTTON, self.OnClearCategories)
# *** Layout *** #
LEFT_CENTER = wx.ALIGN_LEFT|wx.ALIGN_CENTER_VERTICAL
LEFT_BOTTOM = lyt.ALGN_LB
RIGHT_BOTTOM = wx.ALIGN_RIGHT|wx.ALIGN_BOTTOM
lyt_top = BoxSizer(wx.HORIZONTAL)
lyt_top.Add(chk_enable, 0, LEFT_BOTTOM)
lyt_top.AddStretchSpacer(1)
lyt_top.Add(btn_open, 0, wx.ALIGN_TOP)
lyt_top.Add(btn_save, 0, wx.ALIGN_TOP)
lyt_top.Add(btn_preview, 0, wx.ALIGN_TOP)
lyt_opts1 = wx.FlexGridSizer()
lyt_opts1.SetCols(3)
lyt_opts1.SetRows(2)
lyt_opts1.Add(txt_type, 0, LEFT_CENTER)
lyt_opts1.Add(ti_type, 0, wx.EXPAND|wx.LEFT, 5)
lyt_opts1.Add(chk_term, 0, LEFT_CENTER|wx.LEFT, 5)
lyt_opts1.Add(txt_enc, 0, LEFT_CENTER|wx.TOP, 5)
lyt_opts1.Add(ti_enc, 0, lyt.PAD_LT, 5)
lyt_opts1.Add(chk_notify, 0, LEFT_CENTER|lyt.PAD_LT, 5)
lyt_mid = wx.GridBagSizer()
lyt_mid.SetCols(4)
lyt_mid.AddGrowableCol(1)
lyt_mid.AddGrowableCol(3)
# Row 1
row = 0
lyt_mid.Add(txt_filename, (row, 0), flag=LEFT_CENTER)
lyt_mid.Add(ti_filename, (row, 1), flag=wx.EXPAND|wx.LEFT, border=5)
lyt_mid.Add(chk_filename, (row, 2), span=(1, 2), flag=LEFT_CENTER|wx.LEFT, border=5)
# Row 2
row += 1
lyt_mid.Add(txt_name, (row, 0), flag=LEFT_CENTER|wx.TOP, border=5)
lyt_mid.Add(ti_name, (row, 1), flag=wx.EXPAND|lyt.PAD_LT, border=5)
lyt_mid.Add(txt_exec, (row, 2), flag=LEFT_CENTER|lyt.PAD_LT, border=5)
lyt_mid.Add(ti_exec, (row, 3), flag=wx.EXPAND|lyt.PAD_LT, border=5)
# Row 3
row += 1
lyt_mid.Add(txt_comm, (row, 0), flag=LEFT_CENTER|wx.TOP, border=5)
lyt_mid.Add(ti_comm, (row, 1), flag=wx.EXPAND|lyt.PAD_LT, border=5)
lyt_mid.Add(txt_icon, (row, 2), flag=LEFT_CENTER|lyt.PAD_LT, border=5)
lyt_mid.Add(ti_icon, (row, 3), flag=wx.EXPAND|lyt.PAD_LT, border=5)
# Row 4
row += 1
lyt_mid.Add(txt_mime, (row, 0), flag=LEFT_CENTER|wx.TOP, border=5)
lyt_mid.Add(ti_mime, (row, 1), flag=wx.EXPAND|lyt.PAD_LT, border=5)
lyt_bottom = wx.GridBagSizer()
row = 0
lyt_bottom.Add(txt_other, (row, 0), flag=LEFT_BOTTOM)
lyt_bottom.Add(txt_category, (row, 2), flag=LEFT_BOTTOM|wx.LEFT, border=5)
lyt_bottom.Add(ti_category, (row, 3), flag=LEFT_BOTTOM|wx.LEFT, border=5)
lyt_bottom.Add(btn_catadd, (row, 4), flag=RIGHT_BOTTOM)
lyt_bottom.Add(btn_catdel, (row, 5), flag=RIGHT_BOTTOM)
lyt_bottom.Add(btn_catclr, (row, 6), flag=RIGHT_BOTTOM)
row += 1
lyt_bottom.Add(ti_other, (row, 0), (1, 2), wx.EXPAND)
lyt_bottom.Add(lst_categories, (row, 2), (1, 5), wx.EXPAND|wx.LEFT, 5)
lyt_bottom.AddGrowableRow(1)
lyt_bottom.AddGrowableCol(1)
lyt_bottom.AddGrowableCol(4)
# --- Page 5 Sizer --- #
lyt_main = BoxSizer(wx.VERTICAL)
lyt_main.AddSpacer(5)
lyt_main.Add(lyt_top, 0, wx.EXPAND|lyt.PAD_LR, 5)
lyt_main.Add(lyt_opts1, 0, wx.EXPAND|lyt.PAD_LRT, 5)
lyt_main.Add(lyt_mid, 0, wx.EXPAND|lyt.PAD_LRT, 5)
lyt_main.Add(lyt_bottom, 1, wx.EXPAND|wx.ALL, 5)
self.SetAutoLayout(True)
self.SetSizer(lyt_main)
self.Layout()
## Retrieves page data for export
def Get(self):
return self.GetLauncherInfo()
## Formats the launcher information for export
def GetLauncherInfo(self):
desktop_list = [u'[Desktop Entry]']
name = GetField(self, inputid.NAME).GetValue()
if not TextIsEmpty(name):
desktop_list.append(u'Name={}'.format(name))
desktop_list.append(u'Version=1.0')
executable = GetField(self, inputid.EXEC).GetValue()
if not TextIsEmpty(executable):
desktop_list.append(u'Exec={}'.format(executable))
comment = GetField(self, inputid.DESCR).GetValue()
if not TextIsEmpty(comment):
desktop_list.append(u'Comment={}'.format(comment))
icon = GetField(self, inputid.ICON).GetValue()
if not TextIsEmpty(icon):
desktop_list.append(u'Icon={}'.format(icon))
launcher_type = GetField(self, inputid.TYPE).GetValue()
if not TextIsEmpty(launcher_type):
desktop_list.append(u'Type={}'.format(launcher_type))
desktop_list.append(u'Terminal={}'.format(GS(GetField(self, chkid.TERM).GetValue()).lower()))
desktop_list.append(u'StartupNotify={}'.format(GS(GetField(self, chkid.NOTIFY).GetValue()).lower()))
encoding = GetField(self, inputid.ENC).GetValue()
if not TextIsEmpty(encoding):
desktop_list.append(u'Encoding={}'.format(encoding))
lst_categories = GetField(self, listid.CAT)
categories = []
cat_total = lst_categories.GetItemCount()
count = 0
while count < cat_total:
C = lst_categories.GetItemText(count)
if not TextIsEmpty(C):
categories.append(lst_categories.GetItemText(count))
count += 1
# Add a final semi-colon if categories is not empty
if categories:
categories = u';'.join(categories)
if categories[-1] != u';':
categories = u'{};'.format(categories)
desktop_list.append(u'Categories={}'.format(categories))
other = GetField(self, inputid.OTHER).GetValue()
if not TextIsEmpty(other):
desktop_list.append(other)
return u'\n'.join(desktop_list)
## Retrieves the filename to be used for the menu launcher
def GetOutputFilename(self):
if not GetField(self, chkid.FNAME).GetValue():
filename = GetField(self, inputid.FNAME).GetValue().strip(u' ').replace(u' ', u'_')
if not TextIsEmpty(filename):
return filename
return GetField(self, inputid.NAME).GetValue().strip(u' ').replace(u' ', u'_')
## TODO: Doxygen
def GetSaveData(self):
if GetField(self, chkid.ENABLE).GetValue():
data = self.GetLauncherInfo()
data = u'\n'.join(data.split(u'\n')[1:])
if not GetField(self, chkid.FNAME).GetValue():
data = u'[FILENAME={}]\n{}'.format(GetField(self, inputid.FNAME).GetValue(), data)
return u'<<MENU>>\n1\n{}\n<</MENU>>'.format(data)
else:
return u'<<MENU>>\n0\n<</MENU>>'
## TODO: Doxygen
def IsOkay(self):
return GetField(self, chkid.ENABLE).GetValue()
## Handles button event from clear categories button
def OnClearCategories(self, event=None):
cats = GetField(self, listid.CAT)
if cats.GetItemCount():
clear = ConfirmationDialog(GetMainWindow(), GT(u'Confirm'), GT(u'Clear categories?'))
if clear.Confirmed():
cats.DeleteAllItems()
## Saves launcher information to file
#
# FIXME: Might be problems with reading/writing launchers (see OnLoadLauncher)
# 'Others' field not being completely filled out.
def OnExportLauncher(self, event=None):
Logger.Debug(__name__, u'Export launcher ...')
# Get data to write to control file
menu_data = self.GetLauncherInfo().encode(u'utf-8')
dia = wx.FileDialog(GetMainWindow(), GT(u'Save Launcher'), os.getcwd(),
style=wx.FD_SAVE|wx.FD_CHANGE_DIR|wx.FD_OVERWRITE_PROMPT)
if ShowDialog(dia):
path = dia.GetPath()
# Create a backup file
overwrite = False
if os.path.isfile(path):
backup = u'{}.backup'.format(path)
shutil.copy(path, backup)
overwrite = True
try:
WriteFile(path, menu_data)
if overwrite:
os.remove(backup)
except UnicodeEncodeError:
detail1 = GT(u'Unfortunately Debreate does not support unicode yet.')
detail2 = GT(u'Remove any non-ASCII characters from your project.')
ShowErrorDialog(GT(u'Save failed'), u'{}\n{}'.format(detail1, detail2), title=GT(u'Unicode Error'))
os.remove(path)
# Restore from backup
shutil.move(backup, path)
## Loads a .desktop launcher's data
#
# FIXME: Might be problems with reading/writing launchers (see OnExportLauncher)
# 'Others' field not being completely filled out.
def OnLoadLauncher(self, event=None):
dia = wx.FileDialog(GetMainWindow(), GT(u'Open Launcher'), os.getcwd(),
style=wx.FD_CHANGE_DIR)
if ShowDialog(dia):
path = dia.GetPath()
data = ReadFile(path, split=True)
# Remove unneeded lines
if data[0] == u'[Desktop Entry]':
data = data[1:]
self.Reset()
self.SetLauncherData(u'\n'.join(data))
## TODO: Doxygen
def OnPreviewLauncher(self, event=None):
# Show a preview of the .desktop config file
config = self.GetLauncherInfo()
dia = TextPreview(title=GT(u'Menu Launcher Preview'),
text=config, size=(500,400))
dia.ShowModal()
dia.Destroy()
## TODO: Doxygen
def OnSetCustomFilename(self, event=None):
chk_filename = GetField(self, chkid.FNAME)
txt_filename = GetField(self, txtid.FNAME)
ti_filename = GetField(self, inputid.FNAME)
if not chk_filename.IsEnabled():
txt_filename.Enable(False)
ti_filename.Enable(False)
return
if chk_filename.GetValue():
txt_filename.Enable(False)
ti_filename.Enable(False)
return
txt_filename.Enable(True)
ti_filename.Enable(True)
## Enables/Disables fields for creating a launcher
def OnToggle(self, event=None):
enabled = GetField(self, chkid.ENABLE).IsChecked()
# Fields that should not be disabled
skip_ids = (
chkid.ENABLE,
btnid.BROWSE,
txtid.FNAME,
)
for LIST in inputid, chkid, listid, btnid:
for ID in LIST.IdList:
if ID not in skip_ids:
field = GetField(self, ID)
if isinstance(field, wx.Window):
field.Enable(enabled)
# Disable/Enable static text labels
st_labels = GetAllTypeFields(self, wx.StaticText)
for ST in st_labels:
if ST.Id not in skip_ids:
ST.Enable(enabled)
self.OnSetCustomFilename()
## Resets all fields to default values
def Reset(self):
chk_filename = GetField(self, chkid.FNAME)
chk_filename.SetValue(chk_filename.Default)
GetField(self, inputid.FNAME).Clear()
for IDS in inputid, chkid, listid:
idlist = IDS.IdList
for ID in idlist:
field = GetField(self, ID)
if isinstance(field, wx.Window):
field.Reset()
self.OnToggle()
## TODO: Doxygen
def SetCategory(self, event=None):
try:
ID = event.GetKeyCode()
except AttributeError:
ID = event.GetEventObject().GetId()
cat = GetField(self, inputid.CAT).GetValue()
cat = cat.split()
cat = u''.join(cat)
lst_categories = GetField(self, listid.CAT)
if ID in (wx.ID_ADD, wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER):
lst_categories.InsertStringItem(lst_categories.GetItemCount(), cat)
elif ID in (wx.ID_REMOVE, wx.WXK_DELETE):
if lst_categories.GetItemCount() and lst_categories.GetSelectedItemCount():
cur_cat = lst_categories.GetFirstSelected()
lst_categories.DeleteItem(cur_cat)
elif ID == wx.ID_CLEAR:
if lst_categories.GetItemCount():
if ConfirmationDialog(GetMainWindow(), GT(u'Confirm'),
GT(u'Clear categories?')).ShowModal() in (wx.ID_OK, wx.OK):
lst_categories.DeleteAllItems()
if event:
event.Skip()
## Fills out launcher information from loaded file
#
# \param data
# Information to fill out menu launcher fields
# \param enabled
# \b \e bool : Launcher will be flagged for export if True
def SetLauncherData(self, data, enabled=True):
# Make sure we are dealing with a list
if isinstance(data, (unicode, str)):
data = data.split(u'\n')
# Data list is not empty
if data:
Logger.Debug(__name__, u'Loading launcher')
if data[0].isnumeric():
enabled = int(data.pop(0)) > 0
if DebugEnabled():
for L in data:
print(u' Launcher line: {}'.format(L))
Logger.Debug(__name__, u'Enabling launcher: {}'.format(enabled))
if enabled:
GetField(self, chkid.ENABLE).SetValue(True)
data_defs = {}
data_defs_remove = []
misc_defs = []
for L in data:
if u'=' in L:
if L[0] == u'[' and L[-1] == u']':
key = L[1:-1].split(u'=')
value = key[1]
key = key[0]
misc_defs.append(u'{}={}'.format(key, value))
else:
key = L.split(u'=')
value = key[1]
key = key[0]
data_defs[key] = value
# Fields using SetValue() function
set_value_fields = (
(u'Name', GetField(self, inputid.NAME)),
(u'Exec', GetField(self, inputid.EXEC)),
(u'Comment', GetField(self, inputid.DESCR)),
(u'Icon', GetField(self, inputid.ICON)),
(u'Type', GetField(self, inputid.TYPE)),
(u'Encoding', GetField(self, inputid.ENC)),
)
for label, control in set_value_fields:
try:
control.SetValue(data_defs[label])
data_defs_remove.append(label)
except KeyError:
pass
check_box_fields = (
(u'Terminal', GetField(self, chkid.TERM)),
(u'StartupNotify', GetField(self, chkid.NOTIFY)),
)
for label, control in check_box_fields:
try:
if data_defs[label].lower() == u'true':
control.SetValue(True)
else:
control.SetValue(False)
data_defs_remove.append(label)
except KeyError:
pass
try:
lst_categories = GetField(self, listid.CAT)
categories = tuple(data_defs[u'Categories'].split(u';'))
for C in categories:
lst_categories.InsertStringItem(lst_categories.GetItemCount(), C)
data_defs_remove.append(u'Categories')
except KeyError:
pass
for K in data_defs_remove:
if K in data_defs:
del data_defs[K]
# Add any leftover keys to misc/other
for K in data_defs:
if K not in (u'Version',):
misc_defs.append(u'{}={}'.format(K, data_defs[K]))
for index in reversed(range(len(misc_defs))):
K = misc_defs[index]
# Set custom filename
if u'FILENAME=' in K:
filename = K.replace(u'FILENAME=', u'')
if not TextIsEmpty(filename):
Logger.Debug(__name__, u'Setting custom filename: {}'.format(filename))
GetField(self, inputid.FNAME).SetValue(filename)
GetField(self, chkid.FNAME).SetValue(False)
# Remove so not added to misc. list
misc_defs.pop(index)
continue
if misc_defs:
GetField(self, inputid.OTHER).SetValue(u'\n'.join(sorted(misc_defs)))
self.OnToggle()
| 17,487 | 2 | 367 |
61f29d40c230362f1cdff9faa18bb4b56ea6a7e4 | 970 | py | Python | pycmark_vfm/readers.py | tk0miya/pycmark-vfm | 29a04c11e0a0dd9dcdda655d559919fc900aed34 | [
"Apache-2.0"
] | 2 | 2020-01-28T03:22:22.000Z | 2020-01-28T15:17:23.000Z | pycmark_vfm/readers.py | tk0miya/pycmark-vfm | 29a04c11e0a0dd9dcdda655d559919fc900aed34 | [
"Apache-2.0"
] | null | null | null | pycmark_vfm/readers.py | tk0miya/pycmark-vfm | 29a04c11e0a0dd9dcdda655d559919fc900aed34 | [
"Apache-2.0"
] | null | null | null | """
pycmark_vfm.readers
~~~~~~~~~~~~~~~~~~~
Vivliostyle Flavored Markdown readers for docutils.
:copyright: Copyright 2020 by Takeshi KOMIYA
:license: Apache License 2.0, see LICENSE for details.
"""
import re
from pycmark.readers import LineReaderDecorator
class WalledBlockReader(LineReaderDecorator):
"""A reader for walled blocks."""
pattern = re.compile(r'^ {0,3}={3,}\s*$')
def fetch(self, relative: int = 0, **kwargs) -> str:
"""Returns a line until the end of walled block."""
line = self.reader.fetch(relative, **kwargs)
if kwargs.get('allow_endmarker') is True:
return line
elif self.pattern.match(line):
raise IOError
else:
return line
def consume_endmarker(self) -> None:
"""Consumes the end marker of wall block."""
line = self.fetch(1, allow_endmarker=True)
if self.pattern.match(line):
self.step(1)
| 27.714286 | 59 | 0.612371 | """
pycmark_vfm.readers
~~~~~~~~~~~~~~~~~~~
Vivliostyle Flavored Markdown readers for docutils.
:copyright: Copyright 2020 by Takeshi KOMIYA
:license: Apache License 2.0, see LICENSE for details.
"""
import re
from pycmark.readers import LineReaderDecorator
class WalledBlockReader(LineReaderDecorator):
"""A reader for walled blocks."""
pattern = re.compile(r'^ {0,3}={3,}\s*$')
def fetch(self, relative: int = 0, **kwargs) -> str:
"""Returns a line until the end of walled block."""
line = self.reader.fetch(relative, **kwargs)
if kwargs.get('allow_endmarker') is True:
return line
elif self.pattern.match(line):
raise IOError
else:
return line
def consume_endmarker(self) -> None:
"""Consumes the end marker of wall block."""
line = self.fetch(1, allow_endmarker=True)
if self.pattern.match(line):
self.step(1)
| 0 | 0 | 0 |
764e8dc1ef99d73688bc38aba5e92191582a1eff | 810 | py | Python | restrict_ip/migrations/0001_initial.py | celerityweb/django-heroku-restrict-ip | 74b1e7205b0aa4dc0718089525021fba746f32d6 | [
"BSD-3-Clause"
] | null | null | null | restrict_ip/migrations/0001_initial.py | celerityweb/django-heroku-restrict-ip | 74b1e7205b0aa4dc0718089525021fba746f32d6 | [
"BSD-3-Clause"
] | null | null | null | restrict_ip/migrations/0001_initial.py | celerityweb/django-heroku-restrict-ip | 74b1e7205b0aa4dc0718089525021fba746f32d6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-21 14:42
from __future__ import unicode_literals
from django.db import migrations, models
| 27.931034 | 114 | 0.574074 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-21 14:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RestrictedIP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('network', models.CharField(help_text=b'IP or Network mask.', max_length=18)),
('description', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'Restricted IP',
'verbose_name_plural': 'Restricted IPs',
},
),
]
| 0 | 631 | 23 |
f0b18a02a26b9160c522a758616c1e8b138ac2b2 | 98 | py | Python | tests/files/crawl_settings.py | dimichxp/grab | 5fedee3009c18c8a1139b0f82736ebe473021b46 | [
"MIT"
] | 2,266 | 2015-01-01T08:47:33.000Z | 2022-03-21T05:02:55.000Z | tests/files/crawl_settings.py | dimichxp/grab | 5fedee3009c18c8a1139b0f82736ebe473021b46 | [
"MIT"
] | 221 | 2015-01-13T01:55:57.000Z | 2022-02-25T23:23:35.000Z | tests/files/crawl_settings.py | dimichxp/grab | 5fedee3009c18c8a1139b0f82736ebe473021b46 | [
"MIT"
] | 315 | 2015-01-23T06:38:55.000Z | 2022-03-27T08:33:33.000Z | GRAB_SPIDER_CONFIG = {
'global': {
'spider_modules': ['tests.script_crawl'],
},
}
| 16.333333 | 49 | 0.561224 | GRAB_SPIDER_CONFIG = {
'global': {
'spider_modules': ['tests.script_crawl'],
},
}
| 0 | 0 | 0 |
a3b3efd7cd3379343c62cf48ceefb554270ed899 | 7,029 | py | Python | flask_app/fh_webhook/schema.py | nablabits/fareharbor-webhook | 70e415b9ccd45220693eecb6a668746a1282dd03 | [
"MIT"
] | null | null | null | flask_app/fh_webhook/schema.py | nablabits/fareharbor-webhook | 70e415b9ccd45220693eecb6a668746a1282dd03 | [
"MIT"
] | null | null | null | flask_app/fh_webhook/schema.py | nablabits/fareharbor-webhook | 70e415b9ccd45220693eecb6a668746a1282dd03 | [
"MIT"
] | null | null | null | import functools
from marshmallow import Schema, fields
requiredString = functools.partial(fields.String, required=True)
requiredInteger = functools.partial(fields.Integer, required=True)
requiredBool = functools.partial(fields.Bool, required=True)
requiredFloat = functools.partial(fields.Float, required=True)
requiredEmail = functools.partial(fields.Email, required=True)
requiredDate = functools.partial(fields.Date, required=True)
requiredDateTime = functools.partial(fields.DateTime, required=True)
requiredNested = functools.partial(fields.Nested, required=True)
class AddBikesSchema(Schema):
"""Validate the schema for the add-bikes requests."""
availability_id = requiredInteger()
bikes = fields.List(fields.String(), required=True)
class ReplaceBikesSchema(Schema):
"""Validate the schema for the replace-bikes requests."""
availability_id = requiredInteger()
bike_picked = requiredString()
bike_returned = requiredString()
| 32.845794 | 85 | 0.758998 | import functools
from marshmallow import Schema, fields
requiredString = functools.partial(fields.String, required=True)
requiredInteger = functools.partial(fields.Integer, required=True)
requiredBool = functools.partial(fields.Bool, required=True)
requiredFloat = functools.partial(fields.Float, required=True)
requiredEmail = functools.partial(fields.Email, required=True)
requiredDate = functools.partial(fields.Date, required=True)
requiredDateTime = functools.partial(fields.DateTime, required=True)
requiredNested = functools.partial(fields.Nested, required=True)
class IdentifierSchemaMixin(Schema):
pk = fields.Integer(required=True)
class ExtendedOptionsSchema(IdentifierSchemaMixin):
name = requiredString()
is_taxable = requiredBool()
modifier_kind = requiredString()
description_safe_html = requiredString()
offset = requiredInteger()
pk = requiredInteger()
percentage = requiredInteger()
modifier_type = requiredString()
is_always_per_customer = requiredBool()
description = requiredString()
class CustomFieldSchema(IdentifierSchemaMixin):
name = requiredString()
is_required = requiredBool()
description = requiredString()
title = requiredString()
booking_notes_safe_html = requiredString()
is_taxable = requiredBool()
modifier_kind = requiredString()
description_safe_html = requiredString()
booking_notes = requiredString()
offset = requiredInteger()
percentage = requiredInteger()
modifier_type = requiredString()
field_type = requiredString(data_key="type")
is_always_per_customer = requiredBool()
extended_options = fields.Nested(ExtendedOptionsSchema, many=True)
class CustomFieldValueSchema(IdentifierSchemaMixin):
custom_field = requiredNested(CustomFieldSchema)
name = requiredString()
display_value = requiredString()
value = requiredString()
class CustomFieldInstanceSchema(IdentifierSchemaMixin):
custom_field = requiredNested(CustomFieldSchema)
class CustomerPrototypeSchema(IdentifierSchemaMixin):
note = requiredString()
total = requiredInteger()
display_name = requiredString()
total_including_tax = requiredInteger()
class CustomerTypeSchema(IdentifierSchemaMixin):
note = requiredString()
singular = requiredString()
plural = requiredString()
class CustomerTypeRateSchema(IdentifierSchemaMixin):
customer_prototype = requiredNested(CustomerPrototypeSchema)
customer_type = requiredNested(CustomerTypeSchema)
custom_field_instances = fields.Nested(
CustomFieldInstanceSchema, many=True, allow_none=True
)
capacity = requiredInteger()
minimum_party_size = requiredInteger(allow_none=True)
maximum_party_size = requiredInteger(allow_none=True)
total_including_tax = requiredInteger()
total = requiredInteger()
class CompanySchema(Schema):
currency = requiredString()
short_name = fields.String()
shortname = fields.String()
name = requiredString()
class CheckinStatusSchema(IdentifierSchemaMixin):
checkin_status_type = requiredString(data_key="type")
name = requiredString()
# these below are not used for the moment.
unicode = fields.String()
is_hidden = fields.String()
cls_name = fields.String()
company = fields.Field()
uri = fields.String()
sortable_index = fields.Integer()
is_hidden = fields.Boolean()
cls_ = fields.String(data_key="cls")
class CustomerSchema(IdentifierSchemaMixin):
checkin_url = requiredString()
checkin_status = requiredNested(CheckinStatusSchema, allow_none=True)
custom_field_values = requiredNested(
CustomFieldValueSchema, allow_none=True, many=True
)
customer_type_rate = requiredNested(CustomerTypeRateSchema)
class ItemSchema(IdentifierSchemaMixin):
name = requiredString()
class AvailabilitySchema(IdentifierSchemaMixin):
capacity = requiredInteger()
minimum_party_size = requiredInteger(allow_none=True)
maximum_party_size = requiredInteger(allow_none=True)
start_at = requiredDateTime()
end_at = requiredDateTime()
custom_field_instances = requiredNested(CustomFieldInstanceSchema, many=True)
customer_type_rates = requiredNested(CustomerTypeRateSchema, many=True)
item = requiredNested(ItemSchema)
headline = fields.String()
class ContactSchema(Schema):
phone_country = requiredString()
name = requiredString()
is_subscribed_for_email_updates = requiredBool()
normalized_phone = requiredString()
phone = requiredString()
email = requiredString()
language = fields.String()
class EffectiveCancellationPolicySchema(Schema):
cutoff = requiredString(allow_none=True)
cancellation_type = requiredString(data_key="type")
class OrderSchema(Schema):
display_id = requiredString()
class BookingSchema(IdentifierSchemaMixin):
agent = requiredString(allow_none=True)
arrival = requiredString(allow_none=True)
confirmation_url = requiredString()
customer_count = requiredInteger()
dashboard_url = requiredString()
desk = requiredString(allow_none=True)
display_id = requiredString()
external_id = requiredString()
note = requiredString()
note_safe_html = requiredString()
pickup = requiredString(allow_none=True)
pk = requiredInteger()
rebooked_from = requiredString(allow_none=True)
rebooked_to = requiredString(allow_none=True)
status = requiredString()
uuid = requiredString()
voucher_number = requiredString()
# price fields
receipt_subtotal = requiredInteger()
receipt_taxes = requiredInteger()
receipt_total = requiredInteger()
amount_paid = requiredInteger()
invoice_price = requiredInteger()
# Price displays
receipt_subtotal_display = requiredString()
receipt_taxes_display = requiredString()
receipt_total_display = requiredString()
amount_paid_display = requiredString()
invoice_price_display = requiredString()
# Boolean fields
is_eligible_for_cancellation = requiredBool()
is_subscribed_for_sms_updates = requiredBool()
# Nested fields
order = requiredNested(OrderSchema, allow_none=True)
customers = fields.Nested(CustomerSchema, many=True)
availability = requiredNested(AvailabilitySchema)
affiliate_company = requiredNested(CompanySchema, allow_none=True)
company = requiredNested(CompanySchema)
custom_field_values = requiredNested(CustomFieldValueSchema, many=True)
effective_cancellation_policy = requiredNested(EffectiveCancellationPolicySchema)
contact = requiredNested(ContactSchema)
class AddBikesSchema(Schema):
"""Validate the schema for the add-bikes requests."""
availability_id = requiredInteger()
bikes = fields.List(fields.String(), required=True)
class ReplaceBikesSchema(Schema):
"""Validate the schema for the replace-bikes requests."""
availability_id = requiredInteger()
bike_picked = requiredString()
bike_returned = requiredString()
| 0 | 5,651 | 391 |
18926e2306f702e176a9c0e150ceb0e63a7682b1 | 1,194 | py | Python | boto3_assistant/codecommit.py | thorium-cloud/boto3-assistant | 480551afbb28b5348aa54e6dee987f2448544e33 | [
"MIT"
] | null | null | null | boto3_assistant/codecommit.py | thorium-cloud/boto3-assistant | 480551afbb28b5348aa54e6dee987f2448544e33 | [
"MIT"
] | null | null | null | boto3_assistant/codecommit.py | thorium-cloud/boto3-assistant | 480551afbb28b5348aa54e6dee987f2448544e33 | [
"MIT"
] | null | null | null | import boto3
| 27.767442 | 84 | 0.628978 | import boto3
def list_all_repos(token=None):
client = boto3.client('codecommit')
if token is None:
response = client.list_repositories(
sortBy='repositoryName',
order='ascending'
)
else:
response = client.list_repositories(
sortBy='repositoryName',
order='ascending',
nextToken=token
)
if 'nextToken' in response:
response['repositories'] += list_all_repos(token=response['nextToken'])
return response['repositories']
def get_branches(repo_name, token=None):
client = boto3.client('codecommit')
if token is None:
response = client.list_branches(
repositoryName=repo_name
)
else:
response = client.list_branches(
repositoryName=repo_name,
nextToken=token
)
if 'nextToken' in response:
response['branches'] += get_branches(repo_name, token=response['nextToken'])
return response['branches']
def get_repo(repo_name):
client = boto3.client('codecommit')
response = client.get_repository(
repositoryName=repo_name
)
return response['repositoryMetadata'] | 1,110 | 0 | 69 |
adbd4e8269a04ff2455439a3cf6e054b7b12029e | 108 | py | Python | py_code_2017/read_json.py | vimm0/python_exercise | 7773d95b4c25b82a9d014f7a814ac83df9ebac17 | [
"MIT"
] | null | null | null | py_code_2017/read_json.py | vimm0/python_exercise | 7773d95b4c25b82a9d014f7a814ac83df9ebac17 | [
"MIT"
] | null | null | null | py_code_2017/read_json.py | vimm0/python_exercise | 7773d95b4c25b82a9d014f7a814ac83df9ebac17 | [
"MIT"
] | 1 | 2018-01-04T16:27:31.000Z | 2018-01-04T16:27:31.000Z | import pandas as pd
import numpy as np
vim= pd.read_json("MOCK_DATA.json")
con= vim.to_csv("MOCK_DATA.csv") | 21.6 | 35 | 0.759259 | import pandas as pd
import numpy as np
vim= pd.read_json("MOCK_DATA.json")
con= vim.to_csv("MOCK_DATA.csv") | 0 | 0 | 0 |
b254ea0b951015449d2d1892a98370f19d0e0e4a | 1,263 | py | Python | jnpy/datasource/jotdx/constant.py | jojoquant/jnpy | c874060af4b129ae09cee9f8542517b7b2f6573b | [
"MIT"
] | 5 | 2020-05-19T07:32:39.000Z | 2022-03-14T09:09:48.000Z | jnpy/datasource/jotdx/constant.py | jojoquant/jnpy | c874060af4b129ae09cee9f8542517b7b2f6573b | [
"MIT"
] | null | null | null | jnpy/datasource/jotdx/constant.py | jojoquant/jnpy | c874060af4b129ae09cee9f8542517b7b2f6573b | [
"MIT"
] | 3 | 2020-04-02T08:30:17.000Z | 2020-05-03T12:12:05.000Z | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Datetime : 2020/1/17 22:11
# @Author : Fangyang
# @Software : PyCharm
from enum import Enum, unique
class KBarType(Enum):
'''
K 线种类
0 - 5 分钟K 线
1 - 15 分钟K 线
2 - 30 分钟K 线
3 - 1 小时K 线
4 - 日K 线
5 - 周K 线
6 - 月K 线
7 - 1 分钟
8 - 1 分钟K 线
9 - 日K 线
10 - 季K 线
11 - 年K 线
'''
KLINE_TYPE_5MIN = MINUTE_5 = 0
KLINE_TYPE_15MIN = MINUTE_15 = 1
KLINE_TYPE_30MIN = MINUTE_30 = 2
KLINE_TYPE_1HOUR = HOUR = 3
KLINE_TYPE_DAILY = DAILY = 4
KLINE_TYPE_WEEKLY = WEEKLY = 5
KLINE_TYPE_EXHQ_1MIN = MINUTE = 7
KLINE_TYPE_1MIN = 8
# vnpy.trade.constant 的 Interval 枚举类
# 在 pytdxLoader 读取数据的时候, 将vnpy界面拿到的参数转成pytdx
# MINUTE_5 = 0
# MINUTE_15 = 1
# MINUTE_30 = 2
# HOUR = 3
KLINE_TYPE_MONTHLY = 6
KLINE_TYPE_RI_K = 9
KLINE_TYPE_3MONTH = 10
KLINE_TYPE_YEARLY = 11
class FutureMarketCode(Enum):
'''
使用pytdx获取
data_df = ex_api.to_df(ex_api.get_markets())
'''
CFFEX = 47 # 中国金融期货交易所(期货), 期权是 7
SHFE = 30 # 上海期货交易所
CZCE = 28 # 郑州商品交易所
DCE = 29 # 大连商品交易所
INE = 30 # 上海国际能源交易中心
if __name__ == '__main__':
x = FutureMarketCode['SHFE'].value
print(1)
| 19.734375 | 48 | 0.581948 | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Datetime : 2020/1/17 22:11
# @Author : Fangyang
# @Software : PyCharm
from enum import Enum, unique
class KBarType(Enum):
'''
K 线种类
0 - 5 分钟K 线
1 - 15 分钟K 线
2 - 30 分钟K 线
3 - 1 小时K 线
4 - 日K 线
5 - 周K 线
6 - 月K 线
7 - 1 分钟
8 - 1 分钟K 线
9 - 日K 线
10 - 季K 线
11 - 年K 线
'''
KLINE_TYPE_5MIN = MINUTE_5 = 0
KLINE_TYPE_15MIN = MINUTE_15 = 1
KLINE_TYPE_30MIN = MINUTE_30 = 2
KLINE_TYPE_1HOUR = HOUR = 3
KLINE_TYPE_DAILY = DAILY = 4
KLINE_TYPE_WEEKLY = WEEKLY = 5
KLINE_TYPE_EXHQ_1MIN = MINUTE = 7
KLINE_TYPE_1MIN = 8
# vnpy.trade.constant 的 Interval 枚举类
# 在 pytdxLoader 读取数据的时候, 将vnpy界面拿到的参数转成pytdx
# MINUTE_5 = 0
# MINUTE_15 = 1
# MINUTE_30 = 2
# HOUR = 3
KLINE_TYPE_MONTHLY = 6
KLINE_TYPE_RI_K = 9
KLINE_TYPE_3MONTH = 10
KLINE_TYPE_YEARLY = 11
class FutureMarketCode(Enum):
'''
使用pytdx获取
data_df = ex_api.to_df(ex_api.get_markets())
'''
CFFEX = 47 # 中国金融期货交易所(期货), 期权是 7
SHFE = 30 # 上海期货交易所
CZCE = 28 # 郑州商品交易所
DCE = 29 # 大连商品交易所
INE = 30 # 上海国际能源交易中心
if __name__ == '__main__':
x = FutureMarketCode['SHFE'].value
print(1)
| 0 | 0 | 0 |
c07e0b5eb30cce5d7379f6bd45c7253589758c43 | 22,337 | py | Python | rdfframework/datasets/dataconverter.py | KnowledgeLinks/rdfframework | c6b6408b6e90dd166b4981aeaf3a768e46c22ce0 | [
"MIT"
] | 7 | 2016-11-09T07:59:43.000Z | 2019-02-19T01:34:29.000Z | rdfframework/datasets/dataconverter.py | KnowledgeLinks/rdfframework | c6b6408b6e90dd166b4981aeaf3a768e46c22ce0 | [
"MIT"
] | 22 | 2018-01-10T15:03:22.000Z | 2019-12-17T12:00:10.000Z | rdfframework/datasets/dataconverter.py | KnowledgeLinks/rdfframework | c6b6408b6e90dd166b4981aeaf3a768e46c22ce0 | [
"MIT"
] | null | null | null | """ This module is used for setting an intial test configs and values for
the rdfframework """
import datetime
import multiprocessing as mp
import multiprocessing.managers as managers
import pdb
from rdfframework.utilities import SimpleMapReduce
from rdfframework.datatypes import pyrdf, BaseRdfDataType, Uri
def convert_results(data, **kwargs):
""" converts the results of a query to RdfDatatype instances
args:
data: a list of triples
"""
if kwargs.get("multiprocessing", False):
manager = SharedManager()
manager.register("BaseRdfDataType", BaseRdfDataType)
manager.register("Uri", Uri)
data_l = len(data)
group_size = data_l // pool_size
if data_l % pool_size:
group_size += 1
split_data = [data[i:i + group_size]
for i in range(0, data_l, group_size)]
output = manager.Queue()
# output = manager.list()
# output_data = POOL.map(convert_row, split_data)
workers = [mp.Process(target=convert_batch, args=(item, output,))
for item in split_data]
for worker in workers:
# worker.Daemon = True
worker.start()
results = []
while True:
running = any(p.is_alive() for p in workers)
while not output.empty():
results += output.get()
if not running:
break
print("Finished - workers not stoped")
for worker in workers:
worker.join()
# pdb.set_trace()
# return output
for i in range(output.qsize()):
results += output.get()
return results
else:
return [{key:pyrdf(value) for key, value in row.items()}
for row in data]
pool_size = mp.cpu_count() - 1 or 1
DATA = [
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'}},
{'o': {'type': 'literal', 'value': 'Chris Clark'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Collection'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'}},
{'o': {'type': 'literal', 'value': 'Veterans Remember'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'}},
{'o': {'type': 'literal', 'value': '970west'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'}},
{'o': {'type': 'literal', 'value': '970west -- veterans remember'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'}},
{'o': {'type': 'literal', 'value': 'wwii'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'}},
{'o': {'type': 'literal', 'value': 'Charlie Blackmer'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'}},
{'o': {'type': 'literal', 'value': 'Laura Mullenix'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Place'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3186684'}},
{'o': {'type': 'literal', 'value': 'Grand Junction, Colorado'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3186684'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Summary'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3193326'}},
{'o': {'type': 'literal',
'value': 'Interview with Mesa County Libraries production team.'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3193326'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/instanceOf'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/partOf'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'bnode', 'value': 't3186684'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'bnode', 'value': 't3193326'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/summary'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'datatype': 'http://www.w3.org/2001/XMLSchema#dateTime',
'type': 'literal',
'value': '2018-03-28T21:01:01.049Z'},
'p': {'type': 'uri',
'value': 'http://knowledgelinks.io/ns/data-structures/esIndexTime'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Work'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/MovingImage'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Local'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3190361'}},
{'o': {'type': 'literal', 'value': 'mesa:48'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3190361'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3192025'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Manufacture'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3192025'}},
{'o': {'type': 'literal', 'value': '2017-08-16T21:06:55.434652'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/generationDate'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/GenerationProcess'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'literal',
'value': 'Generated by BIBCAT version i1.13.0 from KnowledgeLinks.io',
'xml:lang': 'en'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3194722'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Manufacture'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3194722'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/mainTitle'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Title'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Carrier'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3196572'}},
{'o': {'type': 'literal', 'value': 'Moving Image'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3196572'}},
{'o': {'type': 'uri', 'value': 'https://marmot.org/'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3199929'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Distribution'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3199929'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/CoverArt'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3200840'}},
{'o': {'type': 'uri',
'value': 'https://islandora.marmot.org/islandora/object/mesa:48/datastream/TN/view'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3200840'}},
{'o': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3202252'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Publication'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3202252'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Organization'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'uri', 'value': 'http://schema.org/NGO'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'literal', 'value': 'Marmot Library Network', 'xml:lang': 'en'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Organization'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'uri', 'value': 'http://schema.org/Library'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'literal', 'value': 'Mesa County Libraries', 'xml:lang': 'en'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'}},
{'o': {'type': 'literal', 'value': 'Mesa County Libraries'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'}},
{'o': {'type': 'literal', 'value': 'Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'}},
{'o': {'type': 'bnode', 'value': 't3194298'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/generationProcess'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3196122'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/title'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3200840'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/coverArt'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3192025'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3194722'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3199929'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3202252'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3196572'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/carrier'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3190361'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/identifiedBy'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Instance'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}}]
if __name__ == '__main__':
time_test(DATA)
time_test(DATA, multiprocessing=True)
time_test(DATA)
time_test(DATA, multiprocessing=True)
from rdfframework.sparql import get_all_item_data
from rdfframework.connections import Blazegraph
from rdfframework.datatypes import RdfNsManager
RdfNsManager({"bf": "http://id.loc.gov/ontologies/bibframe/"})
data_iri = "<https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008>"
conn = Blazegraph(namespace="plain2peak")
data = get_all_item_data(data_iri, conn)
print("data count: ", len(data))
time_test(data)
time_test(data, multiprocessing=True)
| 47.830835 | 93 | 0.569145 | """ This module is used for setting an intial test configs and values for
the rdfframework """
import datetime
import multiprocessing as mp
import multiprocessing.managers as managers
import pdb
from rdfframework.utilities import SimpleMapReduce
from rdfframework.datatypes import pyrdf, BaseRdfDataType, Uri
def convert_batch(data, output=None):
# rtn_obj = {}
# rtn_tup = (pyrdf(row['s']), pyrdf(row['p']), pyrdf(row['o']))
# return pyrdf(row['s']) #rtn_tup
# for key, value in row.items():
# # try:
# # print("convert_row_main: ", value)
# # if value.get("datatype") == 'http://www.w3.org/2001/XMLSchema#dateTime':
# # pdb.set_trace()
# rtn_obj[key] = pyrdf(value)
# # print(rtn_obj)
# # except:
# # pdb.set_trace()
# return rtn_obj
print("starting")
# data_l = len(data)
# i = 0
# while i < data_l:
# converted = []
# for row in data[i:i+1000]:
# converted.append({key:pyrdf(value) for key, value in row.items()})
# i += 1
# output.put(converted)
for row in data:
# output.append({key:pyrdf(value) for key, value in row.items()})
output.put([{key:pyrdf(value) for key, value in row.items()}])
# converted = [{key:pyrdf(value) for key, value in row.items()}
# for row in data]
print("converted")
# output.put(converted)
return
# return (val[1], val[2], pyrdf(val[0]),)
# output.put((i, key, pyrdf(val),))
# return (i, key, pyrdf(val),)
# output.put({key:pyrdf(value) for key, value in row.items()})
class SharedManager(managers.BaseManager):
pass
def convert_results(data, **kwargs):
""" converts the results of a query to RdfDatatype instances
args:
data: a list of triples
"""
if kwargs.get("multiprocessing", False):
manager = SharedManager()
manager.register("BaseRdfDataType", BaseRdfDataType)
manager.register("Uri", Uri)
data_l = len(data)
group_size = data_l // pool_size
if data_l % pool_size:
group_size += 1
split_data = [data[i:i + group_size]
for i in range(0, data_l, group_size)]
output = manager.Queue()
# output = manager.list()
# output_data = POOL.map(convert_row, split_data)
workers = [mp.Process(target=convert_batch, args=(item, output,))
for item in split_data]
for worker in workers:
# worker.Daemon = True
worker.start()
results = []
while True:
running = any(p.is_alive() for p in workers)
while not output.empty():
results += output.get()
if not running:
break
print("Finished - workers not stoped")
for worker in workers:
worker.join()
# pdb.set_trace()
# return output
for i in range(output.qsize()):
results += output.get()
return results
else:
return [{key:pyrdf(value) for key, value in row.items()}
for row in data]
pool_size = mp.cpu_count() - 1 or 1
def time_test(data, **kwargs):
start = datetime.datetime.now()
x = convert_results(data, **kwargs)
print("time: ", (datetime.datetime.now() - start), " ", kwargs, " - len: ", len(x))
DATA = [
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'}},
{'o': {'type': 'literal', 'value': 'Chris Clark'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Collection'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'}},
{'o': {'type': 'literal', 'value': 'Veterans Remember'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'}},
{'o': {'type': 'literal', 'value': '970west'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'}},
{'o': {'type': 'literal', 'value': '970west -- veterans remember'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'}},
{'o': {'type': 'literal', 'value': 'wwii'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'}},
{'o': {'type': 'literal', 'value': 'Charlie Blackmer'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'}},
{'o': {'type': 'literal', 'value': 'Laura Mullenix'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Topic'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Place'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3186684'}},
{'o': {'type': 'literal', 'value': 'Grand Junction, Colorado'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3186684'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Summary'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3193326'}},
{'o': {'type': 'literal',
'value': 'Interview with Mesa County Libraries production team.'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3193326'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/instanceOf'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/marmot-collection/veterans-remember'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/partOf'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/970west'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/970west-veterans-remember'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/topic/wwii'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/topic/interview-with-ralph-dorn'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'bnode', 'value': 't3186684'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/subject'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/chris-clark'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/charlie-blackmer'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/laura-mullenix'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/vocabulary/relators/cre'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'bnode', 'value': 't3193326'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/summary'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'datatype': 'http://www.w3.org/2001/XMLSchema#dateTime',
'type': 'literal',
'value': '2018-03-28T21:01:01.049Z'},
'p': {'type': 'uri',
'value': 'http://knowledgelinks.io/ns/data-structures/esIndexTime'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Work'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/MovingImage'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008#Work'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Local'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3190361'}},
{'o': {'type': 'literal', 'value': 'mesa:48'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3190361'}},
{'o': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3192025'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Manufacture'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3192025'}},
{'o': {'type': 'literal', 'value': '2017-08-16T21:06:55.434652'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/generationDate'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/GenerationProcess'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'literal',
'value': 'Generated by BIBCAT version i1.13.0 from KnowledgeLinks.io',
'xml:lang': 'en'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3194298'}},
{'o': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3194722'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Manufacture'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3194722'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/mainTitle'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Title'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'literal', 'value': 'Interview with Ralph Dorn'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'bnode', 'value': 't3196122'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Carrier'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3196572'}},
{'o': {'type': 'literal', 'value': 'Moving Image'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3196572'}},
{'o': {'type': 'uri', 'value': 'https://marmot.org/'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3199929'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Distribution'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3199929'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/CoverArt'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3200840'}},
{'o': {'type': 'uri',
'value': 'https://islandora.marmot.org/islandora/object/mesa:48/datastream/TN/view'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'bnode', 'value': 't3200840'}},
{'o': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/agent'},
's': {'type': 'bnode', 'value': 't3202252'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Publication'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'bnode', 'value': 't3202252'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Organization'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'uri', 'value': 'http://schema.org/NGO'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'literal', 'value': 'Marmot Library Network', 'xml:lang': 'en'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri', 'value': 'https://marmot.org/'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Organization'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'uri', 'value': 'http://schema.org/Library'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'literal', 'value': 'Mesa County Libraries', 'xml:lang': 'en'},
'p': {'type': 'uri', 'value': 'http://www.w3.org/2000/01/rdf-schema#label'},
's': {'type': 'uri', 'value': 'https://mesacountylibraries.org/'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'}},
{'o': {'type': 'literal', 'value': 'Mesa County Libraries'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/agent/mesa-county-libraries'}},
{'o': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/Agent'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'}},
{'o': {'type': 'literal', 'value': 'Ralph Dorn'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#value'},
's': {'type': 'uri', 'value': 'https://plains2peaks.org/agent/ralph-dorn'}},
{'o': {'type': 'bnode', 'value': 't3194298'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/generationProcess'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3196122'},
'p': {'type': 'uri', 'value': 'http://id.loc.gov/ontologies/bibframe/title'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3200840'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/coverArt'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3192025'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3194722'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3199929'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3202252'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/provisionActivity'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3196572'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/carrier'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'bnode', 'value': 't3190361'},
'p': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/identifiedBy'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}},
{'o': {'type': 'uri',
'value': 'http://id.loc.gov/ontologies/bibframe/Instance'},
'p': {'type': 'uri',
'value': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'},
's': {'type': 'uri',
'value': 'https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008'}}]
if __name__ == '__main__':
time_test(DATA)
time_test(DATA, multiprocessing=True)
time_test(DATA)
time_test(DATA, multiprocessing=True)
from rdfframework.sparql import get_all_item_data
from rdfframework.connections import Blazegraph
from rdfframework.datatypes import RdfNsManager
RdfNsManager({"bf": "http://id.loc.gov/ontologies/bibframe/"})
data_iri = "<https://plains2peaks.org/d573941e-82c6-11e7-b159-005056c00008>"
conn = Blazegraph(namespace="plain2peak")
data = get_all_item_data(data_iri, conn)
print("data count: ", len(data))
time_test(data)
time_test(data, multiprocessing=True)
| 1,471 | 30 | 69 |
92d17c52525f5bc7c7b74005ec00f4d28c45876d | 10,429 | py | Python | mmseg/datasets/custom.py | ancientmooner/mmsegmentation | 1af2ad6a9f0a69e848c3cf8c307f75120c7f99b6 | [
"Apache-2.0"
] | 1 | 2021-03-14T06:18:25.000Z | 2021-03-14T06:18:25.000Z | mmseg/datasets/custom.py | dun933/mmsegmentation | 1af2ad6a9f0a69e848c3cf8c307f75120c7f99b6 | [
"Apache-2.0"
] | 1 | 2020-08-17T13:09:41.000Z | 2020-08-17T13:09:41.000Z | mmseg/datasets/custom.py | dun933/mmsegmentation | 1af2ad6a9f0a69e848c3cf8c307f75120c7f99b6 | [
"Apache-2.0"
] | 1 | 2021-05-21T10:37:03.000Z | 2021-05-21T10:37:03.000Z | import os.path as osp
from functools import reduce
import mmcv
import numpy as np
from mmcv.utils import print_log
from torch.utils.data import Dataset
from mmseg.core import mean_iou
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for semantic segmentation.
An example of file structure is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
"""
CLASSES = None
PALETTE = None
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_file = osp.join(img_dir, img_name + img_suffix)
img_info = dict(filename=img_file)
if ann_dir is not None:
seg_map = osp.join(ann_dir, img_name + seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_file = osp.join(img_dir, img)
img_info = dict(filename=img_file)
if ann_dir is not None:
seg_map = osp.join(ann_dir,
img.replace(img_suffix, seg_map_suffix))
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
else:
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by
piepline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def get_gt_seg_maps(self):
"""Get ground truth segmentation maps for evaluation."""
gt_seg_maps = []
for img_info in self.img_infos:
gt_seg_map = mmcv.imread(
img_info['ann']['seg_map'], flag='unchanged', backend='pillow')
if self.reduce_zero_label:
# avoid using underflow conversion
gt_seg_map[gt_seg_map == 0] = 255
gt_seg_map = gt_seg_map - 1
gt_seg_map[gt_seg_map == 254] = 255
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps
def evaluate(self, results, metric='mIoU', logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mIoU']
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps()
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
all_acc, acc, iou = mean_iou(
results, gt_seg_maps, num_classes, ignore_index=self.ignore_index)
summary_str = ''
summary_str += 'per class results:\n'
line_format = '{:<15} {:>10} {:>10}\n'
summary_str += line_format.format('Class', 'IoU', 'Acc')
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
for i in range(num_classes):
iou_str = '{:.2f}'.format(iou[i] * 100)
acc_str = '{:.2f}'.format(acc[i] * 100)
summary_str += line_format.format(class_names[i], iou_str, acc_str)
summary_str += 'Summary:\n'
line_format = '{:<15} {:>10} {:>10} {:>10}\n'
summary_str += line_format.format('Scope', 'mIoU', 'mAcc', 'aAcc')
iou_str = '{:.2f}'.format(np.nanmean(iou) * 100)
acc_str = '{:.2f}'.format(np.nanmean(acc) * 100)
all_acc_str = '{:.2f}'.format(all_acc * 100)
summary_str += line_format.format('global', iou_str, acc_str,
all_acc_str)
print_log(summary_str, logger)
eval_results['mIoU'] = np.nanmean(iou)
eval_results['mAcc'] = np.nanmean(acc)
eval_results['aAcc'] = all_acc
return eval_results
| 35.715753 | 79 | 0.565443 | import os.path as osp
from functools import reduce
import mmcv
import numpy as np
from mmcv.utils import print_log
from torch.utils.data import Dataset
from mmseg.core import mean_iou
from mmseg.utils import get_root_logger
from .builder import DATASETS
from .pipelines import Compose
@DATASETS.register_module()
class CustomDataset(Dataset):
"""Custom dataset for semantic segmentation.
An example of file structure is as followed.
.. code-block:: none
├── data
│ ├── my_dataset
│ │ ├── img_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{img_suffix}
│ │ │ │ ├── yyy{img_suffix}
│ │ │ │ ├── zzz{img_suffix}
│ │ │ ├── val
│ │ ├── ann_dir
│ │ │ ├── train
│ │ │ │ ├── xxx{seg_map_suffix}
│ │ │ │ ├── yyy{seg_map_suffix}
│ │ │ │ ├── zzz{seg_map_suffix}
│ │ │ ├── val
The img/gt_semantic_seg pair of CustomDataset should be of the same
except suffix. A valid img/gt_semantic_seg filename pair should be like
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
in the suffix). If split is given, then ``xxx`` is specified in txt file.
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
Args:
pipeline (list[dict]): Processing pipeline
img_dir (str): Path to image directory
img_suffix (str): Suffix of images. Default: '.jpg'
ann_dir (str, optional): Path to annotation directory. Default: None
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
split (str, optional): Split txt file. If split is specified, only
file with suffix in the splits will be loaded. Otherwise, all
images in img_dir/ann_dir will be loaded. Default: None
data_root (str, optional): Data root for img_dir/ann_dir. Default:
None.
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
ignore_index (int): The label index to be ignored. Default: 255
reduce_zero_label (bool): Whether to mark label zero as ignored.
Default: False
"""
CLASSES = None
PALETTE = None
def __init__(self,
pipeline,
img_dir,
img_suffix='.jpg',
ann_dir=None,
seg_map_suffix='.png',
split=None,
data_root=None,
test_mode=False,
ignore_index=255,
reduce_zero_label=False):
self.pipeline = Compose(pipeline)
self.img_dir = img_dir
self.img_suffix = img_suffix
self.ann_dir = ann_dir
self.seg_map_suffix = seg_map_suffix
self.split = split
self.data_root = data_root
self.test_mode = test_mode
self.ignore_index = ignore_index
self.reduce_zero_label = reduce_zero_label
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.img_dir):
self.img_dir = osp.join(self.data_root, self.img_dir)
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
self.ann_dir = osp.join(self.data_root, self.ann_dir)
if not (self.split is None or osp.isabs(self.split)):
self.split = osp.join(self.data_root, self.split)
# load annotations
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
self.ann_dir,
self.seg_map_suffix, self.split)
def __len__(self):
"""Total number of samples of data."""
return len(self.img_infos)
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
split):
"""Load annotation from directory.
Args:
img_dir (str): Path to image directory
img_suffix (str): Suffix of images.
ann_dir (str|None): Path to annotation directory.
seg_map_suffix (str|None): Suffix of segmentation maps.
split (str|None): Split txt file. If split is specified, only file
with suffix in the splits will be loaded. Otherwise, all images
in img_dir/ann_dir will be loaded. Default: None
Returns:
list[dict]: All image info of dataset.
"""
img_infos = []
if split is not None:
with open(split) as f:
for line in f:
img_name = line.strip()
img_file = osp.join(img_dir, img_name + img_suffix)
img_info = dict(filename=img_file)
if ann_dir is not None:
seg_map = osp.join(ann_dir, img_name + seg_map_suffix)
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
else:
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
img_file = osp.join(img_dir, img)
img_info = dict(filename=img_file)
if ann_dir is not None:
seg_map = osp.join(ann_dir,
img.replace(img_suffix, seg_map_suffix))
img_info['ann'] = dict(seg_map=seg_map)
img_infos.append(img_info)
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
return img_infos
def get_ann_info(self, idx):
"""Get annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
return self.img_infos[idx]['ann']
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
results['seg_fields'] = []
def __getitem__(self, idx):
"""Get training/test data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training/test data (with annotation if `test_mode` is set
False).
"""
if self.test_mode:
return self.prepare_test_img(idx)
else:
return self.prepare_train_img(idx)
def prepare_train_img(self, idx):
"""Get training data and annotations after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Training data and annotation after pipeline with new keys
introduced by pipeline.
"""
img_info = self.img_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
"""Get testing data after pipeline.
Args:
idx (int): Index of data.
Returns:
dict: Testing data after pipeline with new keys intorduced by
piepline.
"""
img_info = self.img_infos[idx]
results = dict(img_info=img_info)
self.pre_pipeline(results)
return self.pipeline(results)
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def get_gt_seg_maps(self):
"""Get ground truth segmentation maps for evaluation."""
gt_seg_maps = []
for img_info in self.img_infos:
gt_seg_map = mmcv.imread(
img_info['ann']['seg_map'], flag='unchanged', backend='pillow')
if self.reduce_zero_label:
# avoid using underflow conversion
gt_seg_map[gt_seg_map == 0] = 255
gt_seg_map = gt_seg_map - 1
gt_seg_map[gt_seg_map == 254] = 255
gt_seg_maps.append(gt_seg_map)
return gt_seg_maps
def evaluate(self, results, metric='mIoU', logger=None, **kwargs):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | None | str): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Default metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mIoU']
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
gt_seg_maps = self.get_gt_seg_maps()
if self.CLASSES is None:
num_classes = len(
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
else:
num_classes = len(self.CLASSES)
all_acc, acc, iou = mean_iou(
results, gt_seg_maps, num_classes, ignore_index=self.ignore_index)
summary_str = ''
summary_str += 'per class results:\n'
line_format = '{:<15} {:>10} {:>10}\n'
summary_str += line_format.format('Class', 'IoU', 'Acc')
if self.CLASSES is None:
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
for i in range(num_classes):
iou_str = '{:.2f}'.format(iou[i] * 100)
acc_str = '{:.2f}'.format(acc[i] * 100)
summary_str += line_format.format(class_names[i], iou_str, acc_str)
summary_str += 'Summary:\n'
line_format = '{:<15} {:>10} {:>10} {:>10}\n'
summary_str += line_format.format('Scope', 'mIoU', 'mAcc', 'aAcc')
iou_str = '{:.2f}'.format(np.nanmean(iou) * 100)
acc_str = '{:.2f}'.format(np.nanmean(acc) * 100)
all_acc_str = '{:.2f}'.format(all_acc * 100)
summary_str += line_format.format('global', iou_str, acc_str,
all_acc_str)
print_log(summary_str, logger)
eval_results['mIoU'] = np.nanmean(iou)
eval_results['mAcc'] = np.nanmean(acc)
eval_results['aAcc'] = all_acc
return eval_results
| 1,426 | 0 | 27 |
9b112d956c3ebbd98b04eaed6459100590a7c391 | 7,426 | py | Python | Project0/test_Ship.py | EricCharnesky/CIS2001-Winter2022 | 9c74433ade96e4c7bf9029543285f1ded1fe91e5 | [
"MIT"
] | 3 | 2022-01-20T21:55:17.000Z | 2022-02-02T23:10:45.000Z | Project0/test_Ship.py | EricCharnesky/CIS2001-Winter2022 | 9c74433ade96e4c7bf9029543285f1ded1fe91e5 | [
"MIT"
] | null | null | null | Project0/test_Ship.py | EricCharnesky/CIS2001-Winter2022 | 9c74433ade96e4c7bf9029543285f1ded1fe91e5 | [
"MIT"
] | 2 | 2022-02-06T02:59:32.000Z | 2022-02-23T02:34:13.000Z | from unittest import TestCase
from Ship import Ship, Repair, Battleship, Corvette, Alignment
| 31.201681 | 86 | 0.640587 | from unittest import TestCase
from Ship import Ship, Repair, Battleship, Corvette, Alignment
class TestShip(TestCase):
def test_ship_init(self):
# arrange
expected_name = "ship"
expected_type = "Ship"
expected_alignment = Alignment.US
expected_x = 1
expected_y = 2
expected_max_health = 10
expected_current_health = 10
expected_attack_power = 10
expected_attack_range = 10
# act
ship = Ship(expected_name, expected_x, expected_y, expected_alignment,
expected_max_health, expected_attack_range, expected_attack_power)
actual_name = ship.get_name()
actual_type = ship.get_type()
actual_x = ship.get_x()
actual_y = ship.get_y()
actual_alignment = ship.get_alignment()
actual_max_health = ship.get_max_health()
actual_current_health = ship.get_current_health()
actual_attack_power = ship.get_attack_power()
actual_attack_range = ship.get_attack_range()
# assert
self.assertEqual(expected_name, actual_name )
self.assertEqual(expected_type, actual_type)
self.assertEqual(expected_x, actual_x)
self.assertEqual(expected_y, actual_y)
self.assertEqual(expected_alignment, actual_alignment)
self.assertEqual(expected_max_health, actual_max_health)
self.assertEqual(expected_current_health, actual_current_health)
self.assertEqual(expected_attack_power, actual_attack_power)
self.assertEqual(expected_attack_range, actual_attack_range)
def test_ship_attack_within_range(self):
# arrange
ship = Ship("", 0, 0, Alignment.US, 10, 10, 10)
target = Ship("", 0, 0, Alignment.THEM, 10, 10, 10)
expected_current_health = 0
# act
ship.attack(target)
actual_current_health = target.get_current_health()
# assert
self.assertEqual(expected_current_health, actual_current_health)
def test_ship_attack_not_within_range(self):
# arrange
ship = Ship("", 0, 0, Alignment.US, 10, 10, 10)
target = Ship("", 10, 10, Alignment.THEM, 10, 10, 10)
expected_current_health = 10
# act
ship.attack(target)
actual_current_health = target.get_current_health()
# assert
self.assertEqual(expected_current_health, actual_current_health)
def test_ship_attack_same_team(self):
# arrange
ship = Ship("", 0, 0, Alignment.US, 10, 10, 10)
target = Ship("", 0, 0, Alignment.US, 10, 10, 10)
expected_current_health = 10
# act
ship.attack(target)
actual_current_health = target.get_current_health()
# assert
self.assertEqual(expected_current_health, actual_current_health)
def test_ship_status(self):
# arrange
ship = Ship("test", 0, 0, Alignment.US, 10, 10, 10)
expected_status = """test
type: Ship
health: 10
location: 0, 0"""
# act
actual_status = ship.status()
# assert
self.assertEqual(expected_status, actual_status)
def test_ship_move(self):
# arrange
ship = Ship("test", 0, 0, Alignment.US, 10, 10, 10, 1, 1)
ship.assess_damage(2)
expected_current_health = 9
expected_x = 1
expected_y = 1
# act
ship.move()
actual_current_health = ship.get_current_health()
actual_x = ship.get_x()
actual_y = ship.get_y()
# assert
self.assertEqual(expected_y, actual_y)
self.assertEqual(expected_x, actual_x)
self.assertEqual(expected_current_health, actual_current_health)
def test_ship_assess_damage_does_not_go_below_0(self):
# arrange
ship = Ship("test", 0, 0, Alignment.US, 10, 10, 10)
expected_current_health = 0
# act
ship.assess_damage(20)
actual_current_health = ship.get_current_health()
# assert
self.assertEqual(expected_current_health, actual_current_health)
def test_ship_assess_damage_does_not_go_above_max(self):
# arrange
ship = Ship("test", 0, 0, Alignment.US, 10, 10, 10)
expected_current_health = 10
# act
ship.assess_damage(-1)
actual_current_health = ship.get_current_health()
# assert
self.assertEqual(expected_current_health, actual_current_health)
def test_ship_change_alignment_us_to_them(self):
# arrange
ship = Ship("test", 0, 0, Alignment.US, 10, 10, 10)
expected_alignment = Alignment.THEM
# act
ship.change_alignment()
actual_alignment = ship.get_alignment()
# assert
self.assertEqual(expected_alignment, actual_alignment)
def test_ship_change_alignment_them_to_us(self):
# arrange
ship = Ship("test", 0, 0, Alignment.THEM, 10, 10, 10)
expected_alignment = Alignment.US
# act
ship.change_alignment()
actual_alignment = ship.get_alignment()
# assert
self.assertEqual(expected_alignment, actual_alignment)
def test_battleship_attack(self):
# arrange
ship = Battleship("", 0, 0, Alignment.US)
target = Ship("", 0, 0, Alignment.THEM, 50, 10, 10)
expected_current_health = 30
expected_torpedoes = 9
# act
ship.attack(target)
actual_current_health = target.get_current_health()
actual_torpedoes = ship.get_torpedoes()
# assert
self.assertEqual(expected_current_health, actual_current_health)
self.assertEqual(expected_torpedoes, actual_torpedoes)
def test_battleship_attack_out_of_torpedoes(self):
# arrange
ship = Battleship("", 0, 0, Alignment.US)
target = Ship("", 0, 0, Alignment.THEM, 300, 10, 10)
expected_current_health = 90
expected_torpedoes = 0
# act
for attack in range(11):
ship.attack(target)
actual_current_health = target.get_current_health()
actual_torpedoes = ship.get_torpedoes()
# assert
self.assertEqual(expected_current_health, actual_current_health)
self.assertEqual(expected_torpedoes, actual_torpedoes)
def test_battleship_status(self):
# arrange
ship = Battleship("test", 0, 0, Alignment.US)
expected_status = """test
type: Battleship
health: 100
location: 0, 0
torpedoes: 10"""
# act
actual_status = ship.status()
# assert
self.assertEqual(expected_status, actual_status)
def test_corvette_attack(self):
# arrange
ship = Corvette("", 0, 0, Alignment.US)
target = Ship("", 0, 0, Alignment.THEM, 50, 10, 10)
expected_alignment = Alignment.US
# act
ship.attack(target)
actual_alignment = target.get_alignment()
# assert
self.assertEqual(expected_alignment, actual_alignment)
def test_repair(self):
# arrange
ship = Repair("", 0, 0, Alignment.US)
target = Ship("", 0, 0, Alignment.US, 50, 10, 10)
expected_current_health = 50
target.assess_damage(10)
# act
ship.attack(target)
actual_current_health = target.get_current_health()
# assert
self.assertEqual(expected_current_health, actual_current_health) | 6,900 | 4 | 428 |
4b2f7907cc7d03c1a77c8f65454fe70bb7de69cf | 313 | py | Python | ramda/nth.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 56 | 2018-08-06T08:44:58.000Z | 2022-03-17T09:49:03.000Z | ramda/nth.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 28 | 2019-06-17T11:09:52.000Z | 2022-02-18T16:59:21.000Z | ramda/nth.py | jakobkolb/ramda.py | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | [
"MIT"
] | 5 | 2019-09-18T09:24:38.000Z | 2021-07-21T08:40:23.000Z | from toolz import curry
@curry
def nth(n, xs):
"""Returns the nth element of the given list or string. If n is negative the
element at index length + n is returned"""
try:
return xs[n]
except (IndexError, TypeError):
if type(xs) is str:
return ""
return None
| 22.357143 | 80 | 0.603834 | from toolz import curry
@curry
def nth(n, xs):
"""Returns the nth element of the given list or string. If n is negative the
element at index length + n is returned"""
try:
return xs[n]
except (IndexError, TypeError):
if type(xs) is str:
return ""
return None
| 0 | 0 | 0 |
a897a163b5047e9dc7df25355568da4762f01b4b | 4,835 | py | Python | duel_dqn.py | zachary2wave/UAV | df9f46a54b842504f13a4bb257f249b094addc0b | [
"MIT"
] | null | null | null | duel_dqn.py | zachary2wave/UAV | df9f46a54b842504f13a4bb257f249b094addc0b | [
"MIT"
] | null | null | null | duel_dqn.py | zachary2wave/UAV | df9f46a54b842504f13a4bb257f249b094addc0b | [
"MIT"
] | null | null | null | import scipy.io as sio
import time
import os
import numpy as np
import gym
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Subtract, Concatenate, BatchNormalization
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import tensorflow as tf
nowtime = time.strftime("%y_%m_%d_%H",time.localtime())
ENV_NAME = 'uav-D2Ddy-v0'
# ENV_NAME = 'discrete-action-uav-stable-2d-v0'
if not os.path.exists(ENV_NAME+'-'+nowtime):
os.mkdir(ENV_NAME+'-'+nowtime)
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
policy_list = ['maxG', 'minSNR', 'cline']
# Next, we build a very simple model regardless of the dueling architecture
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Dense(1024,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(256,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(256,
kernel_regularizer=regularizers.l2(0.1),
bias_regularizer=regularizers.l2(0.1))(flattened_observation)
x = Activation('relu')(x)
x = Dense(128,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(x)
x = Activation('relu')(x)
x = Dense(nb_actions)(x)
x = Activation('linear')(x)
model = Model(inputs=[observation_input], outputs=[x])
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling networ
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-3, policy=policy)
dqn.compile(Adam(lr=1e-4), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
history = dqn.learning(env, Given_policy, policy_list, nb_steps=5e6, visualize=False, log_interval=1000, verbose=2,
nb_max_episode_steps=1000, imitation_leaning_time=0, reinforcement_learning_time=1e10)
sio.savemat(ENV_NAME+'-'+nowtime+'/fit.mat', history.history)
# After training is done, we save the final weights.
dqn.save_weights(ENV_NAME+'-'+nowtime+'/fit-weights.h5f', overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
history = dqn.test(env, nb_episodes=10, visualize=True, nb_max_episode_steps=5000)
sio.savemat(ENV_NAME+'-'+nowtime+'/test.mat', history.history) | 38.070866 | 115 | 0.680662 | import scipy.io as sio
import time
import os
import numpy as np
import gym
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Subtract, Concatenate, BatchNormalization
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import tensorflow as tf
nowtime = time.strftime("%y_%m_%d_%H",time.localtime())
ENV_NAME = 'uav-D2Ddy-v0'
# ENV_NAME = 'discrete-action-uav-stable-2d-v0'
if not os.path.exists(ENV_NAME+'-'+nowtime):
os.mkdir(ENV_NAME+'-'+nowtime)
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
policy_list = ['maxG', 'minSNR', 'cline']
def Given_policy(env, policy, now):
dx = env.SPplacex
dy = env.SPplacey
selected = np.where(env.G != 0)[0]
if policy == 'maxG':
num = np.argmax(env.G)
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
elif policy == 'minSNR':
num = now
if env.G[num] == 0:
tnum = np.argmin(env.SNR[selected] + 10000)
num = selected[tnum]
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
elif policy == 'random':
num = now
if env.G[env.cline] == 0:
num = np.random.choice(selected)
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
elif policy == 'cline':
num = env.cline
if env.G[env.cline] == 0:
num = np.random.choice(selected)
aimx, aimy = dx[num] - env.placex, dy[num] - env.placey
norm = np.sqrt(aimx ** 2 + aimy ** 2)
aimx = aimx / norm
aimy = aimy / norm
if np.abs(env.v[0] + aimx * env.delta * env.amax) > env.Vmax:
aimx = 0
if np.abs(env.v[1] + aimy * env.delta * env.amax) > env.Vmax:
aimy = 0
aimx = np.around(np.abs(aimx*20) / 5) * np.sign(aimx) * 5
aimy = np.around(np.abs(aimy*20) / 5) * np.sign(aimy) * 5
action = env.a_sp.index([aimx, aimy])
return action, num
# Next, we build a very simple model regardless of the dueling architecture
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Dense(1024,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(512,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(256,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(flattened_observation)
x = Activation('relu')(x)
x = Dense(256,
kernel_regularizer=regularizers.l2(0.1),
bias_regularizer=regularizers.l2(0.1))(flattened_observation)
x = Activation('relu')(x)
x = Dense(128,
kernel_regularizer=regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01))(x)
x = Activation('relu')(x)
x = Dense(nb_actions)(x)
x = Activation('linear')(x)
model = Model(inputs=[observation_input], outputs=[x])
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling networ
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-3, policy=policy)
dqn.compile(Adam(lr=1e-4), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
history = dqn.learning(env, Given_policy, policy_list, nb_steps=5e6, visualize=False, log_interval=1000, verbose=2,
nb_max_episode_steps=1000, imitation_leaning_time=0, reinforcement_learning_time=1e10)
sio.savemat(ENV_NAME+'-'+nowtime+'/fit.mat', history.history)
# After training is done, we save the final weights.
dqn.save_weights(ENV_NAME+'-'+nowtime+'/fit-weights.h5f', overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
history = dqn.test(env, nb_episodes=10, visualize=True, nb_max_episode_steps=5000)
sio.savemat(ENV_NAME+'-'+nowtime+'/test.mat', history.history) | 1,273 | 0 | 22 |
e831e91e03091f462d6e9c6bc4af36897b8dca5b | 1,825 | py | Python | src/artifice/scraper/utils/general.py | artifice-project/artifice-scraper | f224a0da22162fd479d6b9f9095ff5cae4723716 | [
"MIT"
] | null | null | null | src/artifice/scraper/utils/general.py | artifice-project/artifice-scraper | f224a0da22162fd479d6b9f9095ff5cae4723716 | [
"MIT"
] | 5 | 2019-09-18T19:17:14.000Z | 2021-03-20T01:46:06.000Z | src/artifice/scraper/utils/general.py | artifice-project/artifice-scraper | f224a0da22162fd479d6b9f9095ff5cae4723716 | [
"MIT"
] | null | null | null |
def headify(arg):
'''
Eliminates frustration resulting from the automatic formatting
of request header keys.
>>> headify('AUTH_TOKEN')
'Auth-Token'
# TODO::
# >>> headify({'AUTH_TOKEN': 'Unchanged_Value'})
# {'Auth-Token': 'Unchanged_Value'}
'''
func = lambda x: '-'.join([_.title() for _ in x.split('_')])
return func(arg)
| 23.701299 | 109 | 0.61589 |
def headify(arg):
'''
Eliminates frustration resulting from the automatic formatting
of request header keys.
>>> headify('AUTH_TOKEN')
'Auth-Token'
# TODO::
# >>> headify({'AUTH_TOKEN': 'Unchanged_Value'})
# {'Auth-Token': 'Unchanged_Value'}
'''
func = lambda x: '-'.join([_.title() for _ in x.split('_')])
return func(arg)
def cmp_dict(before, after):
reply = {}
for key, val in after.items():
if before.get(key) != val:
reply.update({key: after.get(key)})
return reply
def validate_auth(request):
import logging
from flask import request, current_app
log = logging.getLogger(__name__)
key = 'AUTH_TOKEN'
if current_app.config.get('DEBUG'):
return True
client_token = request.headers.get(headify(key))
if not client_token:
return False
server_token = current_app.config.get(key)
if client_token != server_token:
log.info(' * VALIDATION: MISMATCH (client:`{0}` != server:`{1}`)'.format(client_token, server_token))
return False
return True
def _side_load(data):
reply = []
for key, val in data.items():
if isinstance(val, list):
for each in val:
reply.append({key:each})
else:
reply.append({key:val})
return reply
def side_load(key, data):
return _side_load({key: data.get(key)})
def setattrs(obj, **kwargs):
for k, v in kwargs.items():
setattr(obj, k, v)
return obj
def force_json(obj):
import json
raw_json = json.dumps(obj, indent=4, sort_keys=True, default=str)
safe_json = json.loads(raw_json)
return safe_json
def git_sha():
import git
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
return sha
| 1,277 | 0 | 161 |
fe67205bcc15927cf1eaff84f93077b22d98de76 | 5,094 | py | Python | src/controller/tunneling/l2_tunnel.py | SSICLOPS/interconnection-agent | 98bd23337de2cc27e0c84d7d1f1c2e060110a4ad | [
"BSD-3-Clause"
] | null | null | null | src/controller/tunneling/l2_tunnel.py | SSICLOPS/interconnection-agent | 98bd23337de2cc27e0c84d7d1f1c2e060110a4ad | [
"BSD-3-Clause"
] | null | null | null | src/controller/tunneling/l2_tunnel.py | SSICLOPS/interconnection-agent | 98bd23337de2cc27e0c84d7d1f1c2e060110a4ad | [
"BSD-3-Clause"
] | null | null | null | """
BSD 3-Clause License
Copyright (c) 2018, Maël Kimmerlin, Aalto University, Finland
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from aiohttp import web
import uuid
from marshmallow import Schema, fields, post_load, ValidationError, validate
import logging
import utils
from helpers_n_wrappers import container3, utils3
| 37.182482 | 84 | 0.704554 | """
BSD 3-Clause License
Copyright (c) 2018, Maël Kimmerlin, Aalto University, Finland
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from aiohttp import web
import uuid
from marshmallow import Schema, fields, post_load, ValidationError, validate
import logging
import utils
from helpers_n_wrappers import container3, utils3
class L2_tunnel_schema(Schema):
name = fields.Str()
node_id = fields.Str(validate=utils.validate_uuid)
self_ip = fields.Str(validate=utils.validate_ip_address)
peer_id = fields.Str(validate=utils.validate_uuid)
peer_ip = fields.Str(validate=utils.validate_ip_address)
peer_public_ip = fields.Str(validate=utils.validate_ip_address)
type = fields.Str(validate=validate.OneOf(["gre","vxlan"]))
enabled = fields.Boolean()
peer_vni = fields.Integer(validate=lambda n: 2<= n <= 4095)
status = fields.Str(validate=validate.OneOf(
["Pending", "Ok", "Deleting", "Failed"]
))
deleting = fields.Boolean()
@post_load
def load_node(self, data):
return L2_tunnel(**data)
class L2_tunnel(container3.ContainerNode):
def __init__(self, **kwargs):
self.node_id = str(uuid.uuid4())
utils3.set_attributes(self, override = True, **kwargs)
super().__init__(name="L2_tunnel")
utils3.set_attributes(self, override = False, status="Pending",
deleting = False)
def lookupkeys(self):
""" Return the lookup keys of the node """
keys = []
keys.append((utils.KEY_L2_TUNNEL, False))
keys.append((self.node_id, True))
keys.append(((utils.KEY_L2_TUNNEL, self.node_id), True))
keys.append(((utils.KEY_L2_TUNNEL, utils.KEY_L2_TUNNEL_IP, self.self_ip),
False
))
return keys
async def get_l2_tunnels(data_store, amqp, node_id=None):
ret = utils.get_objects(data_store, amqp, L2_tunnel_schema,
utils.KEY_L2_TUNNEL, node_id=None
)
raise web.HTTPOk(content_type="application/json", text = ret)
async def create_l2_tunnel(data_store, amqp, **kwargs):
ret, l2_tunnel = utils.create_object(data_store, amqp, L2_tunnel_schema, kwargs)
await send_create_tunnel(data_store, amqp, l2_tunnel)
raise web.HTTPAccepted(content_type="application/json",
text = ret
)
async def delete_l2_tunnel(data_store, amqp, node_id):
l2_tunnel = utils.delete_object(data_store, amqp, node_id, utils.KEY_L2_TUNNEL)
l2_tunnel.status = "Deleting"
l2_tunnel.deleting = True
data_store.save(l2_tunnel)
await send_delete_tunnel(data_store, amqp, l2_tunnel)
raise web.HTTPAccepted()
async def send_create_tunnel(data_store, amqp, tunnel, no_wait = False):
await _send_action_tunnel(data_store, amqp, utils.ACTION_ADD_TUNNEL, tunnel,
no_wait
)
async def send_delete_tunnel(data_store, amqp, tunnel, no_wait = False):
await _send_action_tunnel(data_store, amqp, utils.ACTION_DEL_TUNNEL, tunnel,
no_wait
)
async def _send_action_tunnel(data_store, amqp, action, tunnel, no_wait):
if not data_store.has((utils.KEY_AGENT, utils.KEY_AGENT_IP, tunnel.self_ip)):
return
agent_amqp = data_store.get((utils.KEY_AGENT, utils.KEY_AGENT_IP,
tunnel.self_ip
))
payload = {"operation":action,
"kwargs": L2_tunnel_schema().dump(tunnel).data
}
await amqp.publish_action(payload=payload,
node = agent_amqp, callback = utils.ack_callback, no_wait = no_wait
)
| 1,879 | 1,214 | 209 |
d22047a0fb9cfc42ab64bf8122197a1f3d12d798 | 1,269 | py | Python | rpi/manual_tests/move_servos.py | danielmundi/laser-tracking | 7637b04b2cfe71efa265a7e956dd80f99aafd4db | [
"MIT"
] | 2 | 2018-12-13T18:30:17.000Z | 2018-12-13T18:30:31.000Z | rpi/manual_tests/move_servos.py | danielmundi/laser-tracking | 7637b04b2cfe71efa265a7e956dd80f99aafd4db | [
"MIT"
] | null | null | null | rpi/manual_tests/move_servos.py | danielmundi/laser-tracking | 7637b04b2cfe71efa265a7e956dd80f99aafd4db | [
"MIT"
] | null | null | null | import pigpio
import time
if __name__ == "__main__":
main()
| 27 | 73 | 0.541371 | import pigpio
import time
def main():
laser_pin = 16
motorx_pin = 12
motory_pin = 13
pi = pigpio.pi()
pi.set_mode(motorx_pin, pigpio.OUTPUT) # motor x
pi.set_mode(motory_pin, pigpio.OUTPUT) # motor x
pi.set_mode(laser_pin, pigpio.OUTPUT) # laser
pi.set_pull_up_down(laser_pin, pigpio.PUD_DOWN)
pi.write(laser_pin, 1)
n = 0
#time.sleep(2)
try:
while n >= 0:
s = input("DC: ")
n = float(s)
#i, n = s.split(' ')
#i, n = int(i), float(n)
i = 1
if i == 1:
pi.hardware_PWM(motorx_pin, 50, int(angle2duty(n)*10000))
#pi.hardware_PWM(motorx_pin, 50, int(n*10000))
else:
pi.hardware_PWM(motory_pin, 50, int(angle2duty(n)*10000))
#pi.hardware_PWM(motory_pin, 50, int(n*10000))
#print(str((n)))
print(str(angle2duty(n)))
except (KeyboardInterrupt):
pass
finally:
pi.hardware_PWM(motorx_pin, 0, 0)
pi.hardware_PWM(motory_pin, 0, 0)
pi.write(laser_pin, 0)
pi.stop()
def angle2duty(angle):
duty = angle*(11.1-3)/(180) + 3
return duty #int(duty*10)/10.0
if __name__ == "__main__":
main()
| 1,158 | 0 | 46 |
237f7086121854e2159d90eb2b9dc8fa069e5708 | 324 | py | Python | python/Dictionaries/Sports_Data.py | Aditya-Narayan-Nayak/Nxt-wave | 1e0b7daa23ce5c3bdd4f6127c055f053db699573 | [
"MIT"
] | 1 | 2021-11-21T05:31:10.000Z | 2021-11-21T05:31:10.000Z | python/Dictionaries/Sports_Data.py | Aditya-Narayan-Nayak/Bootcamp | 1e0b7daa23ce5c3bdd4f6127c055f053db699573 | [
"MIT"
] | null | null | null | python/Dictionaries/Sports_Data.py | Aditya-Narayan-Nayak/Bootcamp | 1e0b7daa23ce5c3bdd4f6127c055f053db699573 | [
"MIT"
] | 1 | 2021-10-30T16:30:41.000Z | 2021-10-30T16:30:41.000Z | students_dict = {
"Ram": "Cricket",
"Naresh": "Football",
"Vani": "Tennis",
"Rahim": "Cricket"
}
# Write your code here
n = int(input())
for i in range(n):
key_value_pair = input().split()
key, value = key_value_pair[0], key_value_pair[1]
students_dict[key]= value
print(students_dict)
| 20.25 | 53 | 0.611111 | students_dict = {
"Ram": "Cricket",
"Naresh": "Football",
"Vani": "Tennis",
"Rahim": "Cricket"
}
# Write your code here
n = int(input())
for i in range(n):
key_value_pair = input().split()
key, value = key_value_pair[0], key_value_pair[1]
students_dict[key]= value
print(students_dict)
| 0 | 0 | 0 |
bd8f8bad4fda35e37f23f1431eb3a4af6609864f | 2,977 | py | Python | learn/train.py | girving/pentago-learn | b28399aed8a77152786e97e62ee3e3a8c384af2a | [
"BSD-3-Clause"
] | null | null | null | learn/train.py | girving/pentago-learn | b28399aed8a77152786e97e62ee3e3a8c384af2a | [
"BSD-3-Clause"
] | null | null | null | learn/train.py | girving/pentago-learn | b28399aed8a77152786e97e62ee3e3a8c384af2a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""Pentago neural net training"""
import argparse
import datasets
import equivariant as ev
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import numbers
import optax
import timeit
if __name__ == '__main__':
main()
| 26.345133 | 91 | 0.650655 | #!/usr/bin/env python3
"""Pentago neural net training"""
import argparse
import datasets
import equivariant as ev
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import numbers
import optax
import timeit
def pretty_info(info):
def f(v):
return v if isinstance(v, numbers.Integral) else f'{v:.3}'
return ', '.join(f'{k} {f(v)}' for k, v in info.items())
def print_info(info):
print(pretty_info(info))
def logits_fn(quads):
layers = 4
width = 128
mid = 128
layer_scale = 1
return ev.nf_net(quads, layers=layers, width=width, mid=mid, layer_scale=layer_scale)
def train(*,
logits_fn,
dataset,
batch=64,
lr=1e-3,
slog=print_info,
log_every=100,
key=jax.random.PRNGKey(7)):
# Define network
@hk.transform
def loss_fn(data):
quads = data['quads']
values = data['value']
assert quads.shape == (batch, 4, 9)
assert values.shape == (batch,)
labels = jax.nn.one_hot(values + 1, num_classes=3)
logits, metrics = logits_fn(quads)
loss = jnp.sum(labels * jax.nn.log_softmax(logits)) / -batch
accuracy = (jnp.argmax(logits, axis=-1) == values + 1).astype(np.float32).mean()
return loss, dict(loss=loss, accuracy=accuracy, **metrics)
# Initialize
key, key_ = jax.random.split(key)
params = loss_fn.init(key_, next(dataset.forever(batch=batch)))
print(f'params = {sum(p.size for p in jax.tree_leaves(params)):,}')
# Optimizer
opt = optax.adam(lr)
opt_state = opt.init(params)
# Update step
@jax.jit
def update(params, opt_state, data):
grads, metrics = jax.grad(lambda p: loss_fn.apply(p, None, data), has_aux=True)(params)
updates, opt_state = opt.update(grads, opt_state)
params = optax.apply_updates(params, updates)
return params, opt_state, metrics
# Train
metrics = dict(sps=[], loss=[], accuracy=[])
for step, data in enumerate(dataset.forever(batch=batch)):
start = timeit.default_timer()
params, opt_state, ms = update(params, opt_state, data)
ms['sps'] = batch / (timeit.default_timer() - start)
for s in metrics:
metrics[s].append(ms[s])
if step % log_every == 0:
e = dataset.step_to_epoch(step, batch=batch)
info = dict(step=step, epochs=e, samples=step*batch)
for s, xs in metrics.items():
info[s] = np.mean(xs)
xs.clear()
slog(info)
def main():
# Parse arguments
parser = argparse.ArgumentParser(description='Pentago train')
parser.add_argument('--log', type=str, help='log file')
options = parser.parse_args()
# Logging
if options.log:
log_file = open(options.log, 'w')
def slog(info):
s = pretty_info(info)
print(s)
print(s, file=log_file)
log_file.flush()
else:
slog = print_info
# Train
dataset = datasets.SparseData(seed=7, counts=(16,17,18))
train(logits_fn=logits_fn, dataset=dataset, slog=slog)
if __name__ == '__main__':
main()
| 2,589 | 0 | 115 |
ed7aa6d853c43cc85bd1a66e061a6b14adbc480a | 1,835 | py | Python | selia/views/create_views/collection_devices/select_device.py | IslasGECI/selia | 9863c32cd45db13053a1d2add67f5bdc1871b791 | [
"BSD-4-Clause"
] | null | null | null | selia/views/create_views/collection_devices/select_device.py | IslasGECI/selia | 9863c32cd45db13053a1d2add67f5bdc1871b791 | [
"BSD-4-Clause"
] | 13 | 2020-01-07T21:53:50.000Z | 2022-01-13T01:53:50.000Z | selia/views/create_views/collection_devices/select_device.py | IslasGECI/selia | 9863c32cd45db13053a1d2add67f5bdc1871b791 | [
"BSD-4-Clause"
] | 1 | 2021-05-06T19:38:09.000Z | 2021-05-06T19:38:09.000Z | from irekua_database.models import Collection
from irekua_filters.devices import physical_devices as device_utils
from irekua_permissions.data_collections import (
devices as device_permissions)
from selia.views.utils import SeliaList
from selia.views.create_views import SeliaSelectView
| 35.980392 | 80 | 0.719891 | from irekua_database.models import Collection
from irekua_filters.devices import physical_devices as device_utils
from irekua_permissions.data_collections import (
devices as device_permissions)
from selia.views.utils import SeliaList
from selia.views.create_views import SeliaSelectView
class SelectCollectionDevicePhysicalDeviceView(SeliaSelectView):
template_name = 'selia/create/collection_devices/select_device.html'
prefix = 'physical_device'
create_url = 'selia:create_collection_device'
def has_view_permission(self):
user = self.request.user
return device_permissions.create(user, collection=self.collection)
def get_objects(self):
if not hasattr(self, 'collection'):
self.collection = Collection.objects.get(
name=self.request.GET['collection'])
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, *kwargs)
context['collection'] = self.collection
return context
def get_list_class(self):
devices = self.request.user.physicaldevice_created_by.exclude(
collectiondevice__collection__name=self.request.GET['collection'])
collection_type = self.collection.collection_type
if collection_type.restrict_device_types:
devices = devices.filter(
device__device_type__in=collection_type.device_types.all())
class DeviceList(SeliaList):
filter_class = device_utils.Filter
search_fields = device_utils.search_fields
ordering_fields = device_utils.ordering_fields
queryset = devices
list_item_template = 'selia/select_list_items/physical_devices.html'
filter_form_template = 'selia/filters/physical_device.html'
return DeviceList
| 1,212 | 305 | 23 |
e2dfdf4e284c9277e94a21b459afeef7ae4e11be | 53,701 | py | Python | app/test/test_vesting.py | noob77777/ergopad-api | 50e822b4e54a210688664ca209afcdd09c1bfc2d | [
"MIT"
] | null | null | null | app/test/test_vesting.py | noob77777/ergopad-api | 50e822b4e54a210688664ca209afcdd09c1bfc2d | [
"MIT"
] | 23 | 2022-03-09T11:31:32.000Z | 2022-03-31T08:53:27.000Z | app/test/test_vesting.py | noob77777/ergopad-api | 50e822b4e54a210688664ca209afcdd09c1bfc2d | [
"MIT"
] | 2 | 2022-02-16T03:40:05.000Z | 2022-02-16T22:40:15.000Z | from hashlib import blake2b
import logging
import pytest
from config import Config, Network
from ergo_python_appkit.appkit import ErgoAppKit, ErgoValueT
from sigmastate.lang.exceptions import InterpreterException
from org.ergoplatform.appkit import Address, CoveringBoxes, ErgoToken
import java
CFG = Config[Network]
DEBUG = True # CFG.DEBUG
| 43.517828 | 626 | 0.571777 | from hashlib import blake2b
import logging
import pytest
from config import Config, Network
from ergo_python_appkit.appkit import ErgoAppKit, ErgoValueT
from sigmastate.lang.exceptions import InterpreterException
from org.ergoplatform.appkit import Address, CoveringBoxes, ErgoToken
import java
CFG = Config[Network]
DEBUG = True # CFG.DEBUG
class TestProxyNFTLockedVesting:
appKit = ErgoAppKit(CFG.node, Network, CFG.explorer)
sigusd = "03faf2cb329f2e90d6d23b58d91bbb6c046aa143261cc21f52fbe2824bfcbf04"
ergusdoracle = "011d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
proxyNFT = "021d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
vestedTokenId = "031d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
sellerAddress = Address.create("9h7L7sUHZk43VQC3PHtSp5ujAWcZtYmWATBH746wi75C5XHi68b")
sellerProp = sellerAddress.toErgoContract().getErgoTree()
whitelistTokenId = "041d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
nErgPrice = 3.19e-7 #
vestedTokenPrice = 0.001 #2 decimals
with open(f'contracts/NFTLockedVesting.es') as f:
script = f.read()
nftLockedVestingContractTree = appKit.compileErgoScript(script)
with open(f'contracts/proxyNFTLockedVesting.es') as f:
script = f.read()
proxyNftLockedVestingTree = appKit.compileErgoScript(
script,
{
"_NFTLockedVestingContract": ErgoAppKit.ergoValue(blake2b(bytes.fromhex(nftLockedVestingContractTree.bytesHex()), digest_size=32).digest(), ErgoValueT.ByteArray).getValue(),
"_ErgUSDOracleNFT": ErgoAppKit.ergoValue(ergusdoracle, ErgoValueT.ByteArrayFromHex).getValue(),
"_SigUSDTokenId": ErgoAppKit.ergoValue(sigusd, ErgoValueT.ByteArrayFromHex).getValue()
}
)
proxyVestingBox = appKit.buildInputBox(
value=int(1e6),
tokens={proxyNFT: 1, vestedTokenId: 1000000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000) #priceDenom
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = appKit.contractFromTree(proxyNftLockedVestingTree))
oracleBox = appKit.buildInputBox(
value=int(1e6),
tokens={ergusdoracle: 1},
registers = [ErgoAppKit.ergoValue(313479623,ErgoValueT.Long)],
contract = Address.create("NTkuk55NdwCXkF1e2nCABxq7bHjtinX3wH13zYPZ6qYT71dCoZBe1gZkh9FAr7GeHo2EpFoibzpNQmoi89atUjKRrhZEYrTapdtXrWU4kq319oY7BEWmtmRU9cMohX69XMuxJjJP5hRM8WQLfFnffbjshhEP3ck9CKVEkFRw1JDYkqVke2JVqoMED5yxLVkScbBUiJJLWq9BSbE1JJmmreNVskmWNxWE6V7ksKPxFMoqh1SVePh3UWAaBgGQRZ7TWf4dTBF5KMVHmRXzmQqEu2Fz2yeSLy23sM3pfqa78VuvoFHnTFXYFFxn3DNttxwq3EU3Zv25SmgrWjLKiZjFcEcqGgH6DJ9FZ1DfucVtTXwyDJutY3ksUBaEStRxoUQyRu4EhDobixL3PUWRcxaRJ8JKA9b64ALErGepRHkAoVmS8DaE6VbroskyMuhkTo7LbrzhTyJbqKurEzoEfhYxus7bMpLTePgKcktgRRyB7MjVxjSpxWzZedvzbjzZaHLZLkWZESk1WtdM25My33wtVLNXiTvficEUbjA23sNd24pv1YQ72nY1aqUHa2").toErgoContract())
def test_vesting_pure_erg(self):
userInputBox = self.appKit.buildInputBox(
value=int(10e9),
tokens={self.whitelistTokenId: 100000},
registers=None,
contract=self.appKit.dummyContract()
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 900000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000) #priceDenom
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 100000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Number of periods
int(1648771200000), #Start vesting april 1st
int(100000) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
nergamount = int(100000*self.vestedTokenPrice/self.nErgPrice)
sellerOutput = self.appKit.buildOutBox(
value=int(100000*self.vestedTokenPrice/self.nErgPrice+1),
tokens={self.whitelistTokenId: 100000},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, userInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_vesting_pure_sigusd(self):
userInputBox = self.appKit.buildInputBox(
value=int(10e9),
tokens={self.whitelistTokenId: 100000, self.sigusd: 100},
registers=None,
contract=self.appKit.dummyContract()
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 900000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000) #priceDenom
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 100000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(100000) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
sellerOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.whitelistTokenId: 100000, self.sigusd: 100},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, userInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_vesting_erg_sigusd(self):
userInputBox = self.appKit.buildInputBox(
value=int(10e9),
tokens={self.whitelistTokenId: 100000, self.sigusd: 50},
registers=None,
contract=self.appKit.dummyContract()
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 900000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000) #priceDenom
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 100000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(100000) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
sellerOutput = self.appKit.buildOutBox(
value=int(50000*self.vestedTokenPrice/self.nErgPrice+1),
tokens={self.whitelistTokenId: 100000, self.sigusd: 50},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, userInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
class TestProxyNFTLockedVestingV2:
appKit = ErgoAppKit(CFG.node, Network, CFG.explorer)
sigusd = "03faf2cb329f2e90d6d23b58d91bbb6c046aa143261cc21f52fbe2824bfcbf04"
ergusdoracle = "011d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
proxyNFT = "021d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
vestedTokenId = "031d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
sellerAddress = Address.create("9h7L7sUHZk43VQC3PHtSp5ujAWcZtYmWATBH746wi75C5XHi68b")
botOpAddress = Address.create("9h7L7sUHZk43VQC3PHtSp5ujAWcZtYmWATBH746wi75C5XHi68b")
sellerProp = sellerAddress.toErgoContract().getErgoTree()
whitelistTokenId = "041d3364de07e5a26f0c4eef0852cddb387039a921b7154ef3cab22c6eda887f"
nErgPrice = 3.19e-7 #
vestedTokenPrice = 0.001 #2 decimals
with open(f'contracts/NFTLockedVestingV2.es') as f:
script = f.read()
nftLockedVestingContractTree = appKit.compileErgoScript(script)
with open(f'contracts/proxyNFTLockedVestingV2.es') as f:
script = f.read()
proxyNftLockedVestingTree = appKit.compileErgoScript(
script,
{
"_NFTLockedVestingContract": ErgoAppKit.ergoValue(blake2b(bytes.fromhex(nftLockedVestingContractTree.bytesHex()), digest_size=32).digest(), ErgoValueT.ByteArray).getValue(),
"_ErgUSDOracleNFT": ErgoAppKit.ergoValue(ergusdoracle, ErgoValueT.ByteArrayFromHex).getValue(),
"_SigUSDTokenId": ErgoAppKit.ergoValue(sigusd, ErgoValueT.ByteArrayFromHex).getValue()
}
)
with open(f'contracts/userProxyNFTLockedVesting.es') as f:
script = f.read()
userProxyNftLockedVestingContractTree = appKit.compileErgoScript(script,{"_ErgUSDOracleNFT": ErgoAppKit.ergoValue(ergusdoracle, ErgoValueT.ByteArrayFromHex).getValue()})
proxyVestingBox = appKit.buildInputBox(
value=int(1e6),
tokens={proxyNFT: 1, vestedTokenId: 1000000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000), #priceDenom,
int(0),
int(0),
int(100),
int(0)
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = appKit.contractFromTree(proxyNftLockedVestingTree))
oracleBox = appKit.buildInputBox(
value=int(1e6),
tokens={ergusdoracle: 1},
registers = [ErgoAppKit.ergoValue(313479623,ErgoValueT.Long)],
contract = Address.create("NTkuk55NdwCXkF1e2nCABxq7bHjtinX3wH13zYPZ6qYT71dCoZBe1gZkh9FAr7GeHo2EpFoibzpNQmoi89atUjKRrhZEYrTapdtXrWU4kq319oY7BEWmtmRU9cMohX69XMuxJjJP5hRM8WQLfFnffbjshhEP3ck9CKVEkFRw1JDYkqVke2JVqoMED5yxLVkScbBUiJJLWq9BSbE1JJmmreNVskmWNxWE6V7ksKPxFMoqh1SVePh3UWAaBgGQRZ7TWf4dTBF5KMVHmRXzmQqEu2Fz2yeSLy23sM3pfqa78VuvoFHnTFXYFFxn3DNttxwq3EU3Zv25SmgrWjLKiZjFcEcqGgH6DJ9FZ1DfucVtTXwyDJutY3ksUBaEStRxoUQyRu4EhDobixL3PUWRcxaRJ8JKA9b64ALErGepRHkAoVmS8DaE6VbroskyMuhkTo7LbrzhTyJbqKurEzoEfhYxus7bMpLTePgKcktgRRyB7MjVxjSpxWzZedvzbjzZaHLZLkWZESk1WtdM25My33wtVLNXiTvficEUbjA23sNd24pv1YQ72nY1aqUHa2").toErgoContract())
def test_covering_boxes(self):
covBoxes: CoveringBoxes = CoveringBoxes(int(1e6),java.util.ArrayList([self.proxyVestingBox]),java.util.ArrayList([ErgoToken(self.vestedTokenId,10000)]),False)
assert covBoxes.isCovered()
def test_covering_boxes_notEnoughErg(self):
covBoxes: CoveringBoxes = CoveringBoxes(int(2e6),java.util.ArrayList([self.proxyVestingBox]),java.util.ArrayList([ErgoToken(self.vestedTokenId,10000)]),False)
assert not covBoxes.isCovered()
def test_covering_boxes_notEnoughTokens(self):
assert not ErgoAppKit.boxesCovered([self.proxyVestingBox],int(1e6),{self.vestedTokenId:100000000})
def test_vesting_pure_erg(self):
nergAmount = int(100000*self.vestedTokenPrice/self.nErgPrice+1)
userProxyInputBox = self.appKit.buildInputBox(
value=int(22e6)+nergAmount,
tokens={self.whitelistTokenId: 100000},
registers=[
ErgoAppKit.ergoValue(int(313479623),ErgoValueT.Long),
ErgoAppKit.ergoValue(self.appKit.dummyContract().getErgoTree().bytes(), ErgoValueT.ByteArray),
ErgoAppKit.ergoValue(self.proxyNFT,ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.userProxyNftLockedVestingContractTree)
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 900000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000), #priceDenom
int(0),
int(0),
int(100),
int(0)
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 100000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Number of periods
int(1648771200000), #Start vesting april 1st
int(100000), #Initial vesting amount
int(0),
int(100),
int(0)
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
sellerOutput = self.appKit.buildOutBox(
value=nergAmount,
tokens={self.whitelistTokenId: 100000},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
botOperatorOutput = self.appKit.buildOutBox(
value=int(1e7),
tokens=None,
registers=None,
contract=self.botOpAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, userProxyInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput,botOperatorOutput],
fee = int(1e7),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_vesting_pure_sigusd(self):
nergAmount = int(1e6)
userProxyInputBox = self.appKit.buildInputBox(
value=int(22e6)+nergAmount,
tokens={self.whitelistTokenId: 100000, self.sigusd: 100},
registers=[
ErgoAppKit.ergoValue(int(313479623),ErgoValueT.Long),
ErgoAppKit.ergoValue(self.appKit.dummyContract().getErgoTree().bytes(), ErgoValueT.ByteArray),
ErgoAppKit.ergoValue(self.proxyNFT,ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.userProxyNftLockedVestingContractTree)
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 900000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000), #priceDenom
int(0),
int(0),
int(100),
int(0)
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 100000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(100000), #Initial vesting amount
int(0),
int(100),
int(0)
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
sellerOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.whitelistTokenId: 100000, self.sigusd: 100},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
botOperatorOutput = self.appKit.buildOutBox(
value=int(1e7),
tokens=None,
registers=None,
contract=self.botOpAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, userProxyInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput,botOperatorOutput],
fee = int(1e7),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_vesting_erg_sigusd(self):
nergAmount = int(50000*self.vestedTokenPrice/self.nErgPrice+1)
userProxyInputBox = self.appKit.buildInputBox(
value=int(22e6)+nergAmount,
tokens={self.whitelistTokenId: 100000, self.sigusd: 50},
registers=[
ErgoAppKit.ergoValue(int(313479623),ErgoValueT.Long),
ErgoAppKit.ergoValue(self.appKit.dummyContract().getErgoTree().bytes(), ErgoValueT.ByteArray),
ErgoAppKit.ergoValue(self.proxyNFT,ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.userProxyNftLockedVestingContractTree)
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 900000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000), #priceDenom
int(0),
int(0),
int(100),
int(0)
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 100000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(100000), #Initial vesting amount
int(0),
int(100),
int(0)
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
sellerOutput = self.appKit.buildOutBox(
value=int(50000*self.vestedTokenPrice/self.nErgPrice+1),
tokens={self.whitelistTokenId: 100000, self.sigusd: 50},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
botOperatorOutput = self.appKit.buildOutBox(
value=int(1e7),
tokens=None,
registers=None,
contract=self.botOpAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox,userProxyInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput,botOperatorOutput],
fee = int(1e7),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_vesting_refund_ergslippage(self):
nergAmount = int(100000*self.vestedTokenPrice/self.nErgPrice+1)
userProxyInputBox = self.appKit.buildInputBox(
value=int(22e6)+nergAmount,
tokens={self.whitelistTokenId: 100000},
registers=[
ErgoAppKit.ergoValue(int(313479622),ErgoValueT.Long),
ErgoAppKit.ergoValue(self.appKit.dummyContract().getErgoTree().bytes(), ErgoValueT.ByteArray),
ErgoAppKit.ergoValue(self.proxyNFT,ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.userProxyNftLockedVestingContractTree)
)
userOutput = self.appKit.buildOutBox(
value=int(22e6)+nergAmount-int(7e6),
tokens={self.whitelistTokenId: 100000},
registers=None,
contract=self.appKit.dummyContract()
)
botOperatorOutput = self.appKit.buildOutBox(
value=int(5e6),
tokens=None,
registers=None,
contract=self.botOpAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [userProxyInputBox],
dataInputs= [self.oracleBox],
outputs = [userOutput,botOperatorOutput],
fee = int(2e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_vesting_refund_soldout(self):
nergAmount = int(100000*self.vestedTokenPrice/self.nErgPrice+1)
userProxyInputBox = self.appKit.buildInputBox(
value=int(22e6)+nergAmount,
tokens={self.whitelistTokenId: 10000000000000},
registers=[
ErgoAppKit.ergoValue(int(313479623),ErgoValueT.Long),
ErgoAppKit.ergoValue(self.appKit.dummyContract().getErgoTree().bytes(), ErgoValueT.ByteArray),
ErgoAppKit.ergoValue(self.proxyNFT,ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.userProxyNftLockedVestingContractTree)
)
userOutput = self.appKit.buildOutBox(
value=int(22e6)+nergAmount-int(7e6),
tokens={self.whitelistTokenId: 10000000000000},
registers=None,
contract=self.appKit.dummyContract()
)
botOperatorOutput = self.appKit.buildOutBox(
value=int(5e6),
tokens=None,
registers=None,
contract=self.botOpAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [userProxyInputBox],
dataInputs= [self.proxyVestingBox],
outputs = [userOutput,botOperatorOutput],
fee = int(2e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_vesting_vest_too_many(self):
nergAmount = int(100000*self.vestedTokenPrice/self.nErgPrice+1)
userProxyInputBox = self.appKit.buildInputBox(
value=int(22e6)+nergAmount,
tokens={self.whitelistTokenId: 100000},
registers=[
ErgoAppKit.ergoValue(int(313479623),ErgoValueT.Long),
ErgoAppKit.ergoValue(self.appKit.dummyContract().getErgoTree().bytes(), ErgoValueT.ByteArray),
ErgoAppKit.ergoValue(self.proxyNFT,ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.userProxyNftLockedVestingContractTree)
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 800000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000), #priceDenom
int(0),
int(0),
int(100),
int(0)
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 200000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Number of periods
int(1648771200000), #Start vesting april 1st
int(100000), #Initial vesting amount
int(0),
int(100),
int(0)
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
sellerOutput = self.appKit.buildOutBox(
value=nergAmount,
tokens={self.whitelistTokenId: 100000},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
botOperatorOutput = self.appKit.buildOutBox(
value=int(1e7),
tokens=None,
registers=None,
contract=self.botOpAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, userProxyInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput,botOperatorOutput],
fee = int(1e7),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
with pytest.raises(InterpreterException):
self.appKit.signTransaction(unsignedTx)
def test_vesting_vest_bot_op_stealing_erg(self):
nergAmount = int(100000*self.vestedTokenPrice/self.nErgPrice+1)
userProxyInputBox = self.appKit.buildInputBox(
value=int(22e6)+nergAmount,
tokens={self.whitelistTokenId: 100000},
registers=[
ErgoAppKit.ergoValue(int(313479623),ErgoValueT.Long),
ErgoAppKit.ergoValue(self.appKit.dummyContract().getErgoTree().bytes(), ErgoValueT.ByteArray),
ErgoAppKit.ergoValue(self.proxyNFT,ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.userProxyNftLockedVestingContractTree)
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1, self.vestedTokenId: 900000},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000), #priceDenom
int(0),
int(100),
int(0)
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
vestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 100000},
registers=[
ErgoAppKit.ergoValue([
int(1000*60*60*24), #Redeem period
int(365), #Number of periods
int(1648771200000), #Start vesting april 1st
int(100000), #Initial vesting amount
int(0),
int(100),
int(0)
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.proxyVestingBox.getId().toString(), ErgoValueT.ByteArrayFromHex)
],
contract=self.appKit.contractFromTree(self.nftLockedVestingContractTree)
)
userOutput = self.appKit.mintToken(
value=int(1e6),
tokenId=self.proxyVestingBox.getId().toString(),
tokenName="Vesting Key",
tokenDesc="Vested",
mintAmount=1,
decimals=0,
contract=self.appKit.dummyContract()
)
sellerOutput = self.appKit.buildOutBox(
value=nergAmount,
tokens={self.whitelistTokenId: 100000},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
botOperatorOutput = self.appKit.buildOutBox(
value=int(19e6),
tokens=None,
registers=None,
contract=self.botOpAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, userProxyInputBox],
dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,vestingOutput,userOutput,sellerOutput,botOperatorOutput],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
with pytest.raises(InterpreterException):
self.appKit.signTransaction(unsignedTx)
def test_vesting_remove_funds(self):
sellerInput = self.appKit.buildInputBox(
value=int(3e6),
tokens=None,
registers=None,
contract=self.appKit.dummyContract()
)
proxyVestingOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.proxyNFT: 1},
registers = [
ErgoAppKit.ergoValue([
int(1000*60*60*24), #redeemPeriod
int(365), #numberOfPeriods
int(1648771200000), #vestingStart
int(1), #priceNum
int(1000), #priceDenom
int(0),
int(100),
int(0)
],ErgoValueT.LongArray),
ErgoAppKit.ergoValue(self.vestedTokenId,ErgoValueT.ByteArrayFromHex), #vestedTokenId
ErgoAppKit.ergoValue(self.sellerProp.bytes(),ErgoValueT.ByteArray), #Seller address
ErgoAppKit.ergoValue(self.whitelistTokenId, ErgoValueT.ByteArrayFromHex) #Whitelist tokenid
],
contract = self.appKit.contractFromTree(self.proxyNftLockedVestingTree)
)
sellerOutput = self.appKit.buildOutBox(
value=int(1e6),
tokens={self.vestedTokenId: 1000000},
registers=None,
contract=self.sellerAddress.toErgoContract()
)
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.proxyVestingBox, sellerInput],
#dataInputs= [self.oracleBox],
outputs = [proxyVestingOutput,sellerOutput],
fee = int(1e6),
sendChangeTo = self.sellerAddress.toErgoContract().toAddress().getErgoAddress(),
preHeader = self.appKit.preHeader()
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
class TestNFTLockedVesting:
appKit = ErgoAppKit(CFG.node, Network, CFG.explorer)
with open(f'contracts/NFTLockedVesting.es') as f:
script = f.read()
tree = appKit.compileErgoScript(script)
contract = appKit.contractFromTree(tree)
vestedTokenId = '81ba2a45d4539045995ad6ceeecf9f14b942f944a1c9771430a89c3f88ee898a'
vestingKey = 'f9e5ce5aa0d95f5d54a7bc89c46730d9662397067250aa18a0039631c0f5b809'
fakeVestingKey = 'f9e5ce5aa0d95f5d54a7bc89c46730d9662397067250aa18a0039631c0f5b808'
duration = int(1000*60*60*24)
vestingInputBox = appKit.buildInputBox(int(1e6),
{
vestedTokenId: 999999
},
[
ErgoAppKit.ergoValue([
duration, #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(999999) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(vestingKey, ErgoValueT.ByteArrayFromHex)
],
contract)
def test_normal_redeem(self):
userInputBox = self.appKit.buildInputBox(int(2e6),
{
self.vestingKey: 1
},
registers=None, contract=self.appKit.dummyContract())
#Set the preheader to 2.5 days after vesting start, so 2*redeem amount should be free to claim
preHeader = self.appKit.preHeader(timestamp=int(1648771200000+self.duration*2.5))
newVestingBox = self.appKit.buildOutBox(self.vestingInputBox.getValue(), {
self.vestedTokenId: int(999999-int(2*999999/365))
},
[
ErgoAppKit.ergoValue([
self.duration, #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(999999) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.vestingKey, ErgoValueT.ByteArrayFromHex)
], self.contract)
newUserBox = self.appKit.buildOutBox(int(1e6), {
self.vestingKey: 1,
self.vestedTokenId: int(2*999999/365)
}, registers=None, contract=self.appKit.dummyContract())
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.vestingInputBox, userInputBox],
outputs = [newVestingBox,newUserBox],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = preHeader
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_final_redeem(self):
userInputBox = self.appKit.buildInputBox(int(2e6),
{
self.vestingKey: 1
},
registers=None, contract=self.appKit.dummyContract())
#Set the preheader to 2.5 days after vesting start, so 2*redeem amount should be free to claim
preHeader = self.appKit.preHeader(timestamp=int(1648771200000+self.duration*366))
newUserBox = self.appKit.buildOutBox(int(1e6), {
self.vestingKey: 1,
self.vestedTokenId: int(999999)
}, registers=None, contract=self.appKit.dummyContract())
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.vestingInputBox, userInputBox],
outputs = [newUserBox],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = preHeader
)
signed = False
try:
self.appKit.signTransaction(unsignedTx)
signed = True
except Exception as e:
print(f"Error: {e}")
signed = False
assert signed
def test_redeem_too_much(self):
userInputBox = self.appKit.buildInputBox(int(2e6),
{
self.vestingKey: 1
},
registers=None, contract=self.appKit.dummyContract())
#Set the preheader to 2.5 days after vesting start, so 2*redeem amount should be free to claim
preHeader = self.appKit.preHeader(timestamp=int(1648771200000+self.duration*2.5))
newVestingBox = self.appKit.buildOutBox(self.vestingInputBox.getValue(), {
self.vestedTokenId: int(999999-int(3*999999/365))
},
[
ErgoAppKit.ergoValue([
self.duration, #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(999999) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.vestingKey, ErgoValueT.ByteArrayFromHex)
], self.contract)
newUserBox = self.appKit.buildOutBox(int(1e6), {
self.vestingKey: 1,
self.vestedTokenId: int(3*999999/365)
}, registers=None, contract=self.appKit.dummyContract())
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.vestingInputBox, userInputBox],
outputs = [newVestingBox,newUserBox],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = preHeader
)
with pytest.raises(InterpreterException):
self.appKit.signTransaction(unsignedTx)
def test_wrong_key_redeem(self):
userInputBox = self.appKit.buildInputBox(int(2e6),
{
self.fakeVestingKey: 1
},
registers=None, contract=self.appKit.dummyContract())
#Set the preheader to 2.5 days after vesting start, so 2*redeem amount should be free to claim
preHeader = self.appKit.preHeader(timestamp=int(1648771200000+self.duration*2.5))
newVestingBox = self.appKit.buildOutBox(self.vestingInputBox.getValue(), {
self.vestedTokenId: int(999999-int(2*999999/365))
},
[
ErgoAppKit.ergoValue([
self.duration, #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(999999) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.vestingKey, ErgoValueT.ByteArrayFromHex)
], self.contract)
newUserBox = self.appKit.buildOutBox(int(1e6), {
self.fakeVestingKey: 1,
self.vestedTokenId: int(2*999999/365)
}, registers=None, contract=self.appKit.dummyContract())
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.vestingInputBox, userInputBox],
outputs = [newVestingBox,newUserBox],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = preHeader
)
with pytest.raises(InterpreterException):
self.appKit.signTransaction(unsignedTx)
def test_redeem_not_enough(self):
userInputBox = self.appKit.buildInputBox(int(2e6),
{
self.vestingKey: 1
},
registers=None, contract=self.appKit.dummyContract())
#Set the preheader to 2.5 days after vesting start, so 2*redeem amount should be free to claim
preHeader = self.appKit.preHeader(timestamp=int(1648771200000+self.duration*2.5))
newVestingBox = self.appKit.buildOutBox(self.vestingInputBox.getValue(), {
self.vestedTokenId: int(999999-int(999999/365))
},
[
ErgoAppKit.ergoValue([
self.duration, #Redeem period
int(365), #Redeem amount per period
int(1648771200000), #Start vesting april 1st
int(999999) #Initial vesting amount
], ErgoValueT.LongArray),
#Vesting key
ErgoAppKit.ergoValue(self.vestingKey, ErgoValueT.ByteArrayFromHex)
], self.contract)
newUserBox = self.appKit.buildOutBox(int(1e6), {
self.vestingKey: 1,
self.vestedTokenId: int(999999/365)
}, registers=None, contract=self.appKit.dummyContract())
unsignedTx = self.appKit.buildUnsignedTransaction(
inputs = [self.vestingInputBox, userInputBox],
outputs = [newVestingBox,newUserBox],
fee = int(1e6),
sendChangeTo = self.appKit.dummyContract().toAddress().getErgoAddress(),
preHeader = preHeader
)
with pytest.raises(InterpreterException):
self.appKit.signTransaction(unsignedTx)
| 44,750 | 8,521 | 72 |
5958a2e26d88dc8ace625edd92519b5d66733ed3 | 2,065 | py | Python | plots/LIGO_psds.py | damonge/SNELL | 4bb276225fce8f535619d0f2133a19f3c42aa44f | [
"BSD-3-Clause"
] | 2 | 2020-05-07T03:22:37.000Z | 2021-02-19T14:34:42.000Z | plots/LIGO_psds.py | damonge/SNELL | 4bb276225fce8f535619d0f2133a19f3c42aa44f | [
"BSD-3-Clause"
] | 2 | 2020-04-28T11:13:10.000Z | 2021-06-08T12:20:25.000Z | plots/LIGO_psds.py | damonge/GWSN | 4bb276225fce8f535619d0f2133a19f3c42aa44f | [
"BSD-3-Clause"
] | 2 | 2020-05-07T03:22:43.000Z | 2021-12-05T15:41:05.000Z | import numpy as np
import schnell as snl
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'sans-serif',
'sans-serif': ['Helvetica']})
rc('text', usetex=True)
freqs = np.geomspace(8., 1010., 2048)
dets = [snl.GroundDetector('Hanford', 46.4, -119.4, 171.8,
'data/aLIGO.txt'),
snl.GroundDetector('Livingstone', 30.7, -90.8, 243.0,
'data/aLIGO.txt'),
snl.GroundDetector('Virgo', 43.6, 10.5, 116.5,
'data/Virgo.txt'),
snl.GroundDetector('KAGRA', 36.3, 137.2, 225.0,
'data/KAGRA.txt'),
snl.GroundDetector('Cosmic Explorer', 37.24804, -115.800155, 0.,
'data/CE1_strain.txt')]
et = snl.GroundDetectorTriangle(name='ET0', lat=40.1, lon=9.0,
fname_psd='data/ET.txt', detector_id=0)
plt.figure()
plt.plot(freqs, dets[0].psd(freqs), 'k-', label='LIGO')
plt.plot(freqs, dets[2].psd(freqs), 'k--', label='Virgo')
plt.plot(freqs, dets[3].psd(freqs), 'k:', label='KAGRA')
plt.loglog()
plt.xlim([10, 1000])
plt.ylim([2E-48, 2E-43])
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_LIGO.pdf", bbox_inches='tight')
freqsa = np.geomspace(6, 5000., 3072)
freqsb = np.geomspace(1., 10010., 3072)
plt.figure()
plt.plot(freqsb, et.psd(freqsb), 'k-', label='ET-D')
plt.plot(freqsb, dets[4].psd(freqsb), 'k--', label='CE-S1')
plt.plot(freqsa, dets[0].psd(freqsa), 'k:', label='LIGO A+')
plt.xlim([1.5, 1E4])
plt.ylim([5E-50, 9E-42])
plt.loglog()
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.gca().set_yticks([1E-48, 1E-46, 1E-44, 1E-42])
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_ET.pdf", bbox_inches='tight')
plt.show()
| 37.545455 | 72 | 0.589831 | import numpy as np
import schnell as snl
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'sans-serif',
'sans-serif': ['Helvetica']})
rc('text', usetex=True)
freqs = np.geomspace(8., 1010., 2048)
dets = [snl.GroundDetector('Hanford', 46.4, -119.4, 171.8,
'data/aLIGO.txt'),
snl.GroundDetector('Livingstone', 30.7, -90.8, 243.0,
'data/aLIGO.txt'),
snl.GroundDetector('Virgo', 43.6, 10.5, 116.5,
'data/Virgo.txt'),
snl.GroundDetector('KAGRA', 36.3, 137.2, 225.0,
'data/KAGRA.txt'),
snl.GroundDetector('Cosmic Explorer', 37.24804, -115.800155, 0.,
'data/CE1_strain.txt')]
et = snl.GroundDetectorTriangle(name='ET0', lat=40.1, lon=9.0,
fname_psd='data/ET.txt', detector_id=0)
plt.figure()
plt.plot(freqs, dets[0].psd(freqs), 'k-', label='LIGO')
plt.plot(freqs, dets[2].psd(freqs), 'k--', label='Virgo')
plt.plot(freqs, dets[3].psd(freqs), 'k:', label='KAGRA')
plt.loglog()
plt.xlim([10, 1000])
plt.ylim([2E-48, 2E-43])
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_LIGO.pdf", bbox_inches='tight')
freqsa = np.geomspace(6, 5000., 3072)
freqsb = np.geomspace(1., 10010., 3072)
plt.figure()
plt.plot(freqsb, et.psd(freqsb), 'k-', label='ET-D')
plt.plot(freqsb, dets[4].psd(freqsb), 'k--', label='CE-S1')
plt.plot(freqsa, dets[0].psd(freqsa), 'k:', label='LIGO A+')
plt.xlim([1.5, 1E4])
plt.ylim([5E-50, 9E-42])
plt.loglog()
plt.xlabel(r'$f\,\,[{\rm Hz}]$', fontsize=16)
plt.ylabel(r'$N_f\,\,[{\rm Hz}^{-1}]$', fontsize=16)
plt.gca().tick_params(labelsize="large")
plt.gca().set_yticks([1E-48, 1E-46, 1E-44, 1E-42])
plt.legend(loc='upper right', fontsize=14, frameon=False)
plt.savefig("psd_ET.pdf", bbox_inches='tight')
plt.show()
| 0 | 0 | 0 |
43e5dac36dc6fb3b7053aa83cc101915fd72f719 | 2,930 | py | Python | lively-lions/MUD/items/test/models/test_items.py | Zorakinezear/summer-code-jam-2020 | 33b4158bf89f46eed0a6bc8d37e72d904695a15e | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | lively-lions/MUD/items/test/models/test_items.py | Zorakinezear/summer-code-jam-2020 | 33b4158bf89f46eed0a6bc8d37e72d904695a15e | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | lively-lions/MUD/items/test/models/test_items.py | Zorakinezear/summer-code-jam-2020 | 33b4158bf89f46eed0a6bc8d37e72d904695a15e | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | from django.test import TestCase
import pytest
from items.models import Item_Category, Small_Item, Large_Item
# Create your tests here.
@pytest.mark.django_db
| 52.321429 | 110 | 0.594539 | from django.test import TestCase
import pytest
from items.models import Item_Category, Small_Item, Large_Item
# Create your tests here.
@pytest.mark.django_db
class CreateItem(TestCase):
@classmethod
def setUp(self):
# sets up the categories
self.armor_physical = Item_Category.objects.create(
item_title='Armor',
type='Physical'
)
self.small_buckler = Small_Item.objects.create(
item_category=self.armor_physical,
item_name='Small Buckeler',
effect=5,
durability=7,
weight=1,
description='Small shield for defending.'
)
self.large_shield = Large_Item.objects.create(
item_category=self.armor_physical,
item_name='Large Shield',
effect=7,
durability=12,
weight=2,
description='Large Shield for defending.'
)
def test_create_item_category(self):
assert Item_Category.objects.count() == 1, 'Should be equal'
assert Item_Category.objects.get(pk=1).item_title == self.armor_physical.item_title, "Should be equal"
def test_create_small_item(self):
assert Small_Item.objects.count() == 1, 'Should be equal'
assert Small_Item.objects.get(pk=1) == self.small_buckler, "Should be equal"
assert Small_Item.objects.get(pk=1).item_name == self.small_buckler.item_name, "Should be equal"
assert Small_Item.objects.get(pk=1).effect == self.small_buckler.effect, "Should be equal"
assert Small_Item.objects.get(pk=1).durability == self.small_buckler.durability, "Should be equal"
assert Small_Item.objects.get(pk=1).weight == self.small_buckler.weight, "Should be equal"
assert Small_Item.objects.get(pk=1).description == self.small_buckler.description, "Should be equal"
def test_create_large_item(self):
assert Large_Item.objects.count() == 1, 'Should be equal'
assert Large_Item.objects.get(pk=1) == self.large_shield, "Should be equal"
assert Large_Item.objects.get(pk=1).item_name == self.large_shield.item_name, "Should be equal"
assert Large_Item.objects.get(pk=1).effect == self.large_shield.effect, "Should be equal"
assert Large_Item.objects.get(pk=1).durability == self.large_shield.durability, "Should be equal"
assert Large_Item.objects.get(pk=1).weight == self.large_shield.weight, "Should be equal"
assert Large_Item.objects.get(pk=1).description == self.large_shield.description, "Should be equal"
| 2,615 | 131 | 22 |
ba641889131fadc1d79f357714df4408ae2397cf | 7,517 | py | Python | minecraft/networking/packets/clientbound/play/chunk_data_packet.py | kisate/mPyCraft | adc986d0035f6c3cfd038482ef90273e0e5ffc3b | [
"Apache-2.0"
] | null | null | null | minecraft/networking/packets/clientbound/play/chunk_data_packet.py | kisate/mPyCraft | adc986d0035f6c3cfd038482ef90273e0e5ffc3b | [
"Apache-2.0"
] | null | null | null | minecraft/networking/packets/clientbound/play/chunk_data_packet.py | kisate/mPyCraft | adc986d0035f6c3cfd038482ef90273e0e5ffc3b | [
"Apache-2.0"
] | null | null | null | from minecraft.networking.packets import (
Packet, AbstractKeepAlivePacket, AbstractPluginMessagePacket, PacketBuffer
)
from minecraft.networking.types import (
Integer, FixedPointInteger, Angle, UnsignedByte, Byte, Boolean, UUID,
Short, VarInt, Double, Float, String, Enum, Difficulty, Dimension,
GameMode, Vector, Direction, PositionAndLook, multi_attribute_alias,
VarIntPrefixedByteArray, MutableRecord, Long
)
from minecraft.networking.types import mynbt
import numpy
GLOBAL_BITS_PER_BLOCK = 14 #TODO
| 42.954286 | 155 | 0.508847 | from minecraft.networking.packets import (
Packet, AbstractKeepAlivePacket, AbstractPluginMessagePacket, PacketBuffer
)
from minecraft.networking.types import (
Integer, FixedPointInteger, Angle, UnsignedByte, Byte, Boolean, UUID,
Short, VarInt, Double, Float, String, Enum, Difficulty, Dimension,
GameMode, Vector, Direction, PositionAndLook, multi_attribute_alias,
VarIntPrefixedByteArray, MutableRecord, Long
)
from minecraft.networking.types import mynbt
import numpy
GLOBAL_BITS_PER_BLOCK = 14 #TODO
class ChunkDataPacket(Packet):
@staticmethod
def get_id(context):
return 0x20
packet_name = 'chunk_data'
class ChunkSection(MutableRecord):
__slots__ = 'blocks', 'light', 'sky_light', 'palette', 'data', 'block_metas'
def __init__(self):
self.blocks = numpy.full([16, 16, 16], 0).tolist()
self.light = []
self.sky_light = []
self.data = None
def update_block(self, x, y, z, data):
self.blocks[x][y][z] = data
class Chunk(MutableRecord):
__slots__ = 'x', 'z', 'gu_continuous', 'bitmask', 'sections', 'entities', 'blocks_in_chunk'
def __init__(self, x, z, gu_continuous, bitmask):
self.x = x
self.z = z
self.gu_continuous = gu_continuous
self.bitmask = bitmask
self.sections = [ChunkDataPacket.ChunkSection()]*16
self.entities = []
self.blocks_in_chunk = [] #IDs that are in chunk
def get_block(self, x, y, z, relative=False):
section_number = y // 16
section = self.sections[section_number]
if relative:
return section.blocks[x][y % 16][z] >> 4
else:
return section.blocks[x - self.x*16][y % 16][z - self.z*16] >> 4
def get_block_with_meta(self, x, y, z, relative=False):
section_number = y // 16
section = self.sections[section_number]
if relative:
return (section.blocks[x][y % 16][z] >> 4, section.blocks[x][y % 16][z] & 15)
else:
return (section.blocks[x - self.x*16][y % 16][z - self.z*16] >> 4, section.blocks[x - self.x*16][y % 16][z - self.z*16] & 15)
def update_block(self, x, y, z, data, relative=True):
section_number = y // 16
section = self.sections[section_number]
if relative:
section.update_block(x, y % 16, z, data)
else:
section.update_block(x - self.x*16, y % 16, z - self.z*16, data)
self.sections[section_number] = section
self.update_blocks()
def update_block_multi(self, records):
for record in records:
section_number = record.y // 16
section = self.sections[section_number]
section.update_block(record.x, record.y % 16, record.z, record.block_state_id)
self.sections[section_number] = section
self.update_blocks()
def update_blocks(self):
self.blocks_in_chunk = []
for section in self.sections:
if section is not None:
blocks_in_section = []
for x in section.blocks:
for z in x:
for y in z:
if y not in blocks_in_section:
blocks_in_section.append(y)
self.blocks_in_chunk = list(set(self.blocks_in_chunk) | set(blocks_in_section))
# print(self.blocks_in_chunk)
def read_data(self, data, dimension):
file_object = PacketBuffer()
file_object.send(data)
file_object.reset_cursor()
for i in range(16):
if self.bitmask & (1 << i):
bits_per_block = UnsignedByte.read(file_object)
palette = None
if bits_per_block < GLOBAL_BITS_PER_BLOCK:
palette_length = VarInt.read(file_object)
palette = []
for _ in range(palette_length):
palette.append(VarInt.read(file_object))
section = ChunkDataPacket.ChunkSection()
data_length = VarInt.read(file_object)
data = []
for _ in range(data_length):
part = file_object.read(8)
data.append(int.from_bytes(part, 'big'))
section.data = data
section.palette = palette
block_mask = (1 << bits_per_block) - 1
# print(i)
for y in range(16):
for z in range(16):
for x in range(16):
block_mask = (1 << bits_per_block) - 1
number = (((y << 4) + z) << 4) + x
long_number = (number*bits_per_block) >> 6
bit_in_long_number = (number*bits_per_block) & 63
block = (data[long_number] >> bit_in_long_number) & (block_mask)
if bit_in_long_number + bits_per_block > 64:
block |= (data[long_number + 1] & ((1 << (bit_in_long_number + bits_per_block - 64)) - 1)) << (64 - bit_in_long_number)
if palette:
# if block > 0:
# print(palette)
# print(len(palette))
# print(block)
# print(bits_per_block)
# print((x, y, z, self.x, self.z))
block = palette[block]
if type(block) is float:
print(block)
section.blocks[x][y][z] = block
section.light = file_object.read(2048)
if dimension == 0:
section.sky_light = file_object.read(2048)
self.sections[i] = section
self.update_blocks()
def __repr__(self):
return 'chunk_x={}, chunk_z={}, blocks_in_chunk={}'.format(self.x, self.z, self.blocks_in_chunk)
def read(self, file_object):
# print('Reading chunk packet...')
self.x = Integer.read(file_object)
self.z = Integer.read(file_object)
self.gu_continuous = Boolean.read(file_object)
self.bitmask = VarInt.read(file_object)
self.data_length = VarInt.read(file_object)
self.data = file_object.read(self.data_length)
self.chunk = ChunkDataPacket.Chunk(self.x, self.z, self.gu_continuous, self.bitmask)
self.number_of_entities = VarInt.read(file_object)
for _ in range(self.number_of_entities):
self.chunk.entities.append(mynbt.parse_bytes(file_object))
# print(self.chunk.blocks_in_chunk)
# print(s.data)
# if len(self.chunk.blocks_in_chunk) > 1 : print(self.chunk.blocks_in_chunk)
# print('Reading chunk packet... Done')
| 6,270 | 693 | 23 |
133afe34151f5232a9e2d51d19c83f6dc9a56c53 | 464 | py | Python | setup.py | davidmnoriega/fast2phy | b86e2d3665f9a64ff9707c2fb3adacedf9a3a6f4 | [
"MIT"
] | 1 | 2017-11-07T20:05:05.000Z | 2017-11-07T20:05:05.000Z | setup.py | davidmnoriega/fast2phy | b86e2d3665f9a64ff9707c2fb3adacedf9a3a6f4 | [
"MIT"
] | null | null | null | setup.py | davidmnoriega/fast2phy | b86e2d3665f9a64ff9707c2fb3adacedf9a3a6f4 | [
"MIT"
] | null | null | null | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='fast2phy',
description="Convert aligned FASTA format to interleaved PHYLIP format",
license='MIT',
author='David M Noriega',
author_email='davidmnoriega@gmail.com',
version='1.0',
install_requires=['numpy', 'pyfasta'],
packages=['fast2phy'],
entry_points={
'console_scripts': ['fast2phy=fast2phy:main']}
)
| 24.421053 | 76 | 0.674569 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='fast2phy',
description="Convert aligned FASTA format to interleaved PHYLIP format",
license='MIT',
author='David M Noriega',
author_email='davidmnoriega@gmail.com',
version='1.0',
install_requires=['numpy', 'pyfasta'],
packages=['fast2phy'],
entry_points={
'console_scripts': ['fast2phy=fast2phy:main']}
)
| 0 | 0 | 0 |
c79ca3f1ca56b1e1d7abce7da3c93490c44d36fd | 3,602 | py | Python | data/lib/module.py | Ayantaker/pythonc2 | 9083333def91b0181c8a2c91950d40155e376605 | [
"Apache-2.0"
] | null | null | null | data/lib/module.py | Ayantaker/pythonc2 | 9083333def91b0181c8a2c91950d40155e376605 | [
"Apache-2.0"
] | null | null | null | data/lib/module.py | Ayantaker/pythonc2 | 9083333def91b0181c8a2c91950d40155e376605 | [
"Apache-2.0"
] | null | null | null | import pdb
import os
import pathlib
import sys
from termcolor import colored
from lib.style import Style
from rich import print as pprint
from rich.panel import Panel
import readline
import re
| 30.268908 | 276 | 0.692115 | import pdb
import os
import pathlib
import sys
from termcolor import colored
from lib.style import Style
from rich import print as pprint
from rich.panel import Panel
import readline
import re
class Module:
## Has the task ID to module object mapping
module_task_id = {}
def __init__(self,name,description,utility,language,script):
self.name = name
self.description = description
self.utility = utility
self.language = language
self.script = script
@classmethod
def module_help_menu(cls):
pass
## takes in the option hash defined in each modules and returns the required ones
@classmethod
def get_required_options(cls,module_options):
required_options = []
for option_name in module_options.keys():
if module_options[option_name]['required'] == True:
required_options.append(option_name)
return required_options
@classmethod
def module_menu(cls,module,utility):
module_options,description = cls.get_options(module,utility)
required_options = cls.get_required_options(module_options)
option_hash = {}
while True:
cmd = str(input(colored(f"\n(SpyderC2: Victim: Module) {colored(module,'cyan')} > ",'red')))
if re.match(r'^set ([^ ]+) ([^ ]+)$',cmd):
info = re.findall(r'^set ([^ ]+) ([^ ]+)$',cmd)
## TODO - error handling
option = info[0][0]
value = info[0][1]
if option not in module_options.keys():
print(colored(f"Invalid option - {option} for module {module}",'yellow'))
else:
option_hash[option] = value
print(colored(f"{option} set to {value}",'green'))
elif cmd == 'options':
cls.show_options(module,utility)
elif cmd == 'execute' or cmd == 'run':
unfilled_options = [item for item in required_options if item not in list(option_hash.keys())]
if len(unfilled_options) > 0 :
## User needs to set values for these options as they are required
print(colored(f"Set some values for these options as they are required : {', '.join(unfilled_options)}. Use : set <option_name> <option_value>",'yellow'))
continue
else:
return option_hash
elif cmd == 'help':
cls.module_help_menu()
elif cmd == 'back' or cmd == 'exit':
## TODO handle this
return False
elif cmd == '':
print()
pass
else:
print(f"Not supported. Type {colored('help','cyan')} to see commands supported.")
@classmethod
def get_options(cls,module,utility):
module_folder = os.path.join(str(pathlib.Path(__file__).parent.resolve()), "../modules",utility)
sys.path.append(module_folder)
mod = __import__(module)
## capitalize the first letter
module_name = module.title()
module_options = getattr(mod,module_name).module_options()
description = getattr(mod,module_name).description
return module_options,description
@classmethod
def show_options(cls,module,utility):
module_options,description = cls.get_options(module,utility)
cmd = f"Interacting with [cyan]{module}[/cyan].You can configure the options below by '[cyan]set <option_name> <option_value>[/cyan]'. Once done configuring module, press '[cyan]run[/cyan]' to run it on vicitim.\n\n[cyan]{module.upper()}[/cyan] : [cyan]{description}[/cyan]"
print("\n\n")
pprint(Panel(cmd, title="[red bold blink]INFO!"))
row = []
for key in module_options.keys():
row.append([key,str(module_options[key]['required']),module_options[key]['desc']])
column = {
"Name" : {'style':"gold3"},
"Required":{'style':"cyan"},
"Description":{'justify':"left", 'no_wrap':False}
}
s = Style()
s.create_table("MODULE CONFIGURABLE OPTIONS",column,row,'center')
| 3,024 | 355 | 23 |
1ae6ecabfb9c4df38957f271c527ea3eb353b8ec | 1,427 | py | Python | setup.py | floralist/botaxon | 979bb525eac87eee07593332aaac1bfd96fe44e5 | [
"MIT"
] | null | null | null | setup.py | floralist/botaxon | 979bb525eac87eee07593332aaac1bfd96fe44e5 | [
"MIT"
] | 1 | 2019-04-11T22:46:34.000Z | 2019-04-28T22:23:57.000Z | setup.py | ggueret/botaxon | 979bb525eac87eee07593332aaac1bfd96fe44e5 | [
"MIT"
] | 1 | 2020-05-11T17:21:47.000Z | 2020-05-11T17:21:47.000Z | import os
from setuptools import setup
BASEDIR_PATH = os.path.abspath(os.path.dirname(__file__))
setup(
name="botaxon",
version=get_version(),
author="Geoffrey GUERET",
author_email="geoffrey@gueret.tech",
description="Taxonomic parser for (sub)species botanical names.",
long_description=open(os.path.join(BASEDIR_PATH, "README.md"), "r").read(),
long_description_content_type="text/markdown",
url="https://github.com/ggueret/botaxon",
license="MIT",
packages=["botaxon"],
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
tests_require=["pytest==4.4.1", "pytest-cov==2.6.1"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
| 33.186047 | 79 | 0.62719 | import os
from setuptools import setup
def get_version():
from botaxon import __VERSION__
return __VERSION__
BASEDIR_PATH = os.path.abspath(os.path.dirname(__file__))
setup(
name="botaxon",
version=get_version(),
author="Geoffrey GUERET",
author_email="geoffrey@gueret.tech",
description="Taxonomic parser for (sub)species botanical names.",
long_description=open(os.path.join(BASEDIR_PATH, "README.md"), "r").read(),
long_description_content_type="text/markdown",
url="https://github.com/ggueret/botaxon",
license="MIT",
packages=["botaxon"],
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
tests_require=["pytest==4.4.1", "pytest-cov==2.6.1"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
| 56 | 0 | 23 |
5cd00a8b23b6b97dbe37221986cd0283648cc251 | 3,999 | py | Python | analyze_player.py | rwbogl/voul | 4300b8d14f6e31c2d32e44eac203e9a30e9b8cd1 | [
"MIT"
] | null | null | null | analyze_player.py | rwbogl/voul | 4300b8d14f6e31c2d32e44eac203e9a30e9b8cd1 | [
"MIT"
] | null | null | null | analyze_player.py | rwbogl/voul | 4300b8d14f6e31c2d32e44eac203e9a30e9b8cd1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from glob import glob
import os.path as path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
"""
analyze_player.py
This program implements functions to analyze (and assist in analyzing) player
stats.
"""
def join_years(player_dir):
"""Join the stat years for a player into one pandas dataframe.
:player_dir: TODO
:returns: TODO
"""
# Sort the files by year.
year_csvs = sorted(glob(path.join(player_dir, "*")))
dfs = []
master_df = pd.DataFrame()
for csv in year_csvs:
df = pd.read_csv(csv, parse_dates=True, index_col=0)
master_df = master_df.append(df)
return master_df
def get_player_dfs(player_dir):
"""
Return the player dataframes for every player in `player_dir/*`, using a
dictionary with player name as the key.
:player_dir: Path to player stat directory.
:returns: Dictionary of (name, player_df) key-value pairs.
"""
player_dirs = glob(path.join(player_dir, "*"))
df_dict = {}
for directory in player_dirs:
name = path.basename(path.normpath(directory))
df = join_years(directory)
df_dict[name] = df
return df_dict
def get_team_df(directory, ignore_index=False):
"""Return the dataframe that contains every player on the team.
This...probably doesn't do what we want just yet. Games won't be assigned
the correct index unless everyone has stats on every game.
For example: A freshman in 2017 has the first game of the 2017 season
listed as 0, while a Sophomore might have that listed as 30. Without
consistency, we can't reason about the data. Maybe we should learn about
Pandas' time series features...
2017-09-09: I have modified our parser to include dates, at the expense of
simple plotting. Pandas understands dates very well, and it takes a gap in
dates to mean a gap in data. This gap carries over into plots, making it
look very bad. To fix this, we can reset the index (df.reset_index()) to
temporarily get the 0..n index back. This allows plotting the way it worked
before, but with one extra step. (See below in __main__.)
For single player analysis, this is nice. For multi-player analysis, we
will need to be careful, but having the dates is crucial.
:directory: TODO
:returns: TODO
"""
df_dict = get_player_dfs(directory)
master_df = pd.DataFrame()
for name, df in df_dict.items():
master_df = master_df.append(df, ignore_index=ignore_index)
return master_df
def team_date_mean(team_df):
"""TODO: Docstring for team_mean.
:team_df: TODO
:stat: TODO
:returns: TODO
"""
return team_df.reset_index().groupby("Date").mean()
def team_scatter_plot(team_df_dict, x, y, filter=None):
"""TODO: Docstring for team_scatter_plot.
:team_df_dict: TODO
:returns: TODO
"""
num = plt.figure()
ax = plt.gca()
for name, df in team_df_dict.items():
if filter and filter(df):
plt.plot(df[x], df[y], "o", label=name)
if __name__ == "__main__":
# Example analysis.
plt.style.use("ggplot")
team_df = get_team_df("./player_stats/")
team_df_dict = get_player_dfs("./player_stats/")
top_percentile = team_df["pct"].quantile(.6)
filter_high = lambda df: df["pct"].mean() >= top_percentile
filter_low = lambda df: len(df["pct"].dropna()) > 0 and not filter_high(df)
team_scatter_plot(team_df_dict, "ta", "k", filter_high)
xs = np.linspace(0, 50)
plt.plot(xs, xs, c="k")
plt.xlabel("Total Attempts")
plt.ylabel("Kills")
plt.title("Players with mean PCT above 60th percentile")
plt.legend()
team_scatter_plot(team_df_dict, "ta", "k", filter_low)
xs = np.linspace(0, 45)
plt.plot(xs, xs, c="k")
plt.xlabel("Total Attempts")
plt.ylabel("Kills")
plt.title("Players with mean PCT below 60th percentile")
plt.legend()
plt.show()
| 28.769784 | 79 | 0.671918 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from glob import glob
import os.path as path
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
"""
analyze_player.py
This program implements functions to analyze (and assist in analyzing) player
stats.
"""
def join_years(player_dir):
"""Join the stat years for a player into one pandas dataframe.
:player_dir: TODO
:returns: TODO
"""
# Sort the files by year.
year_csvs = sorted(glob(path.join(player_dir, "*")))
dfs = []
master_df = pd.DataFrame()
for csv in year_csvs:
df = pd.read_csv(csv, parse_dates=True, index_col=0)
master_df = master_df.append(df)
return master_df
def get_player_dfs(player_dir):
"""
Return the player dataframes for every player in `player_dir/*`, using a
dictionary with player name as the key.
:player_dir: Path to player stat directory.
:returns: Dictionary of (name, player_df) key-value pairs.
"""
player_dirs = glob(path.join(player_dir, "*"))
df_dict = {}
for directory in player_dirs:
name = path.basename(path.normpath(directory))
df = join_years(directory)
df_dict[name] = df
return df_dict
def get_team_df(directory, ignore_index=False):
"""Return the dataframe that contains every player on the team.
This...probably doesn't do what we want just yet. Games won't be assigned
the correct index unless everyone has stats on every game.
For example: A freshman in 2017 has the first game of the 2017 season
listed as 0, while a Sophomore might have that listed as 30. Without
consistency, we can't reason about the data. Maybe we should learn about
Pandas' time series features...
2017-09-09: I have modified our parser to include dates, at the expense of
simple plotting. Pandas understands dates very well, and it takes a gap in
dates to mean a gap in data. This gap carries over into plots, making it
look very bad. To fix this, we can reset the index (df.reset_index()) to
temporarily get the 0..n index back. This allows plotting the way it worked
before, but with one extra step. (See below in __main__.)
For single player analysis, this is nice. For multi-player analysis, we
will need to be careful, but having the dates is crucial.
:directory: TODO
:returns: TODO
"""
df_dict = get_player_dfs(directory)
master_df = pd.DataFrame()
for name, df in df_dict.items():
master_df = master_df.append(df, ignore_index=ignore_index)
return master_df
def team_date_mean(team_df):
"""TODO: Docstring for team_mean.
:team_df: TODO
:stat: TODO
:returns: TODO
"""
return team_df.reset_index().groupby("Date").mean()
def team_scatter_plot(team_df_dict, x, y, filter=None):
"""TODO: Docstring for team_scatter_plot.
:team_df_dict: TODO
:returns: TODO
"""
num = plt.figure()
ax = plt.gca()
for name, df in team_df_dict.items():
if filter and filter(df):
plt.plot(df[x], df[y], "o", label=name)
if __name__ == "__main__":
# Example analysis.
plt.style.use("ggplot")
team_df = get_team_df("./player_stats/")
team_df_dict = get_player_dfs("./player_stats/")
top_percentile = team_df["pct"].quantile(.6)
filter_high = lambda df: df["pct"].mean() >= top_percentile
filter_low = lambda df: len(df["pct"].dropna()) > 0 and not filter_high(df)
team_scatter_plot(team_df_dict, "ta", "k", filter_high)
xs = np.linspace(0, 50)
plt.plot(xs, xs, c="k")
plt.xlabel("Total Attempts")
plt.ylabel("Kills")
plt.title("Players with mean PCT above 60th percentile")
plt.legend()
team_scatter_plot(team_df_dict, "ta", "k", filter_low)
xs = np.linspace(0, 45)
plt.plot(xs, xs, c="k")
plt.xlabel("Total Attempts")
plt.ylabel("Kills")
plt.title("Players with mean PCT below 60th percentile")
plt.legend()
plt.show()
| 0 | 0 | 0 |
4d339d92b9dadaa3174faa95a4d7f84f75429d9c | 1,757 | py | Python | tools/text.py | cdcai/NRC | ae7ec47cc4d6240ded55a3dc6dcc79a40d0f00d0 | [
"Apache-2.0"
] | 11 | 2019-12-27T00:25:59.000Z | 2021-07-05T08:12:12.000Z | tools/text.py | scotthlee/nrc | 6005dd36db05a94677ca2b2a4f05404f400066a4 | [
"Apache-2.0"
] | 1 | 2020-03-05T23:24:29.000Z | 2020-03-16T14:16:11.000Z | tools/text.py | scotthlee/nrc | 6005dd36db05a94677ca2b2a4f05404f400066a4 | [
"Apache-2.0"
] | 3 | 2019-10-08T13:52:42.000Z | 2021-01-26T02:54:11.000Z | '''Objects and methods to support text corpus storage and manipulation'''
import numpy as np
import pandas as pd
import re
import string
from nltk.tokenize import TreebankWordTokenizer
from sklearn.feature_extraction.text import CountVectorizer
# Looks up a dict key up by its values
# Uses get_key to lookup a sequence of words or characters
# Converts a text-type column of a categorical variable to integers
# Converts a list of tokens to an array of integers
# Pads a 1D sequence of integers (representing words)
| 33.788462 | 76 | 0.686397 | '''Objects and methods to support text corpus storage and manipulation'''
import numpy as np
import pandas as pd
import re
import string
from nltk.tokenize import TreebankWordTokenizer
from sklearn.feature_extraction.text import CountVectorizer
# Looks up a dict key up by its values
def get_key(value, dic, add_1=False, pad=0):
if add_1 and value != pad:
value += 1
if value == pad:
out = 'pad'
else:
out = list(dic.keys())[list(dic.values()).index(value)]
return out
# Uses get_key to lookup a sequence of words or characters
def ints_to_text(values, dic, level='word', remove_bookends=True):
good_values = values[np.where(values != 0)[0]]
if remove_bookends:
good_values = good_values[1:-1]
tokens = [get_key(val, dic) for val in good_values]
if level == 'word':
return ' '.join(tokens)
return ''.join(tokens)
# Converts a text-type column of a categorical variable to integers
def text_to_int(col):
vec = CountVectorizer(token_pattern="(?u)\\b\\w+\\b")
vec.fit(col)
vocab = vec.vocabulary_
dict_values = [vocab.get(code) for code in col]
return {'values':dict_values, 'vocab':vocab}
# Converts a list of tokens to an array of integers
def to_integer(tokens, vocab_dict, encode=False,
subtract_1=False, dtype=np.uint32):
if encode:
tokens = [str(word, errors='ignore') for word in tokens]
out = np.array([vocab_dict.get(token) for token in tokens], dtype=dtype)
if subtract_1:
out = out - 1
return out
# Pads a 1D sequence of integers (representing words)
def pad_integers(phrase, max_length, padding=0):
pad_size = max_length - len(phrase)
return np.concatenate((phrase, np.repeat(padding, pad_size)))
| 1,124 | 0 | 110 |
7adbef290bccd109587b8e35a92abf28a018c2b5 | 1,387 | py | Python | cell_tools/_general_tools/_funcs/_Harmony/_supporting_funcs/_plot_umap.py | mvinyard/cell-tools | 2482ccbe13c7a5cc06d575adefe0158026c8e03b | [
"MIT"
] | 1 | 2022-01-11T20:00:44.000Z | 2022-01-11T20:00:44.000Z | cell_tools/_general_tools/_funcs/_Harmony/_supporting_funcs/_plot_umap.py | mvinyard/cell-tools | 2482ccbe13c7a5cc06d575adefe0158026c8e03b | [
"MIT"
] | null | null | null | cell_tools/_general_tools/_funcs/_Harmony/_supporting_funcs/_plot_umap.py | mvinyard/cell-tools | 2482ccbe13c7a5cc06d575adefe0158026c8e03b | [
"MIT"
] | null | null | null |
# _plot_umap.py
__module_name__ = "_plot_umap.py"
__author__ = ", ".join(["Michael E. Vinyard"])
__email__ = ", ".join(["vinyard@g.harvard.edu",])
# package imports #
# --------------- #
import matplotlib.pyplot as plt
import numpy as np
import vinplots
def _setup_plot():
""""""
plot = vinplots.Plot()
plot.construct(nplots=2, ncols=2, figsize_width=2, figsize_height=1.2)
plot.style(spines_to_delete=["top", "right"],
color="grey",
spines_to_color=['bottom', 'left'],
spines_positioning_amount=5)
ax = plot.AxesDict[0][0]
return plot, ax
def _plot_umap(adata, umap_key, plot_by, colors_dict=False):
""""""
try:
adata.obs = adata.obs.reset_index()
except:
pass
umap = adata.obsm[umap_key]
if not colors_dict:
c = vinplots.color_palettes.SHAREseq
plot, ax = _setup_plot()
for n, i in enumerate(adata.obs[plot_by].unique()):
if colors_dict:
c_ = colors_dict[i]
else:
c_ = c[n]
idx = adata.obs.loc[adata.obs[plot_by] == i].index.astype(int)
ax.scatter(umap[:, 0][idx], umap[:, 1][idx], c=c_, label=i, s=5, alpha=0.8)
ax.set_title("Harmonized Data")
ax.legend(bbox_to_anchor=(1.05, 1.05), edgecolor="white", markerscale=2)
plt.tight_layout()
return plot
| 22.737705 | 83 | 0.586157 |
# _plot_umap.py
__module_name__ = "_plot_umap.py"
__author__ = ", ".join(["Michael E. Vinyard"])
__email__ = ", ".join(["vinyard@g.harvard.edu",])
# package imports #
# --------------- #
import matplotlib.pyplot as plt
import numpy as np
import vinplots
def _setup_plot():
""""""
plot = vinplots.Plot()
plot.construct(nplots=2, ncols=2, figsize_width=2, figsize_height=1.2)
plot.style(spines_to_delete=["top", "right"],
color="grey",
spines_to_color=['bottom', 'left'],
spines_positioning_amount=5)
ax = plot.AxesDict[0][0]
return plot, ax
def _plot_umap(adata, umap_key, plot_by, colors_dict=False):
""""""
try:
adata.obs = adata.obs.reset_index()
except:
pass
umap = adata.obsm[umap_key]
if not colors_dict:
c = vinplots.color_palettes.SHAREseq
plot, ax = _setup_plot()
for n, i in enumerate(adata.obs[plot_by].unique()):
if colors_dict:
c_ = colors_dict[i]
else:
c_ = c[n]
idx = adata.obs.loc[adata.obs[plot_by] == i].index.astype(int)
ax.scatter(umap[:, 0][idx], umap[:, 1][idx], c=c_, label=i, s=5, alpha=0.8)
ax.set_title("Harmonized Data")
ax.legend(bbox_to_anchor=(1.05, 1.05), edgecolor="white", markerscale=2)
plt.tight_layout()
return plot
| 0 | 0 | 0 |
1e0166b4bc53ca786baa08f4f688ff8c9eeaf090 | 1,703 | py | Python | ginit/__init__.py | Drakkar-Software/init-generator | ffa4d9f53f6626f9f5788c791808d84f83cac6e2 | [
"MIT"
] | 2 | 2020-09-28T22:12:51.000Z | 2021-02-07T03:17:47.000Z | ginit/__init__.py | Drakkar-Software/ginit | ffa4d9f53f6626f9f5788c791808d84f83cac6e2 | [
"MIT"
] | null | null | null | ginit/__init__.py | Drakkar-Software/ginit | ffa4d9f53f6626f9f5788c791808d84f83cac6e2 | [
"MIT"
] | null | null | null | # Drakkar-Software ginit
# Copyright (c) Drakkar-Software, All rights reserved.
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so.
from ginit import util
from ginit.util import (get_python_path_from_path, drop_file_extension, )
from ginit import module
from ginit.module import (ModuleVisitor)
from ginit import visitor
from ginit.visitor import (visit_path)
__project__ = "ginit"
__version__ = "1.1.0"
FILE_TO_IGNORE = ['__init__', '__main__']
PATCHER_FILES_TO_IGNORE = ['__init__.py', '__init__.pxd']
FOLDERS_TO_IGNORE = ['__pycache__']
FUNCTIONS_TO_IGNORE = ['__init__', '__str__', '__repr__', '__del__']
DIRECTORY_MODULES = "."
INIT_SEPARATOR = ", "
IMPORT_MODULE_SEPARATOR = "."
DEFAULT_IMPORT_PATCH_MAX_DEPTH = 2
PYTHON_IMPORT = "import"
PYTHON_INIT = "__init__.py"
PYTHON_EXTS = [".py"]
CYTHON_IMPORT = "cimport"
CYTHON_INIT = "__init__.pxd"
CYTHON_EXTS = [".pxd", ".pyx"]
__all__ = ['__project__', '__version__',
'ModuleVisitor', 'visit_path',
'get_python_path_from_path', 'drop_file_extension',
'DIRECTORY_MODULES', 'FILE_TO_IGNORE', 'FOLDERS_TO_IGNORE', 'PATCHER_FILES_TO_IGNORE',
'DEFAULT_IMPORT_PATCH_MAX_DEPTH', 'IMPORT_MODULE_SEPARATOR',
'PYTHON_INIT', 'PYTHON_IMPORT', 'PYTHON_EXTS',
'CYTHON_INIT', 'CYTHON_IMPORT', 'CYTHON_EXTS']
| 34.755102 | 97 | 0.732824 | # Drakkar-Software ginit
# Copyright (c) Drakkar-Software, All rights reserved.
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so.
from ginit import util
from ginit.util import (get_python_path_from_path, drop_file_extension, )
from ginit import module
from ginit.module import (ModuleVisitor)
from ginit import visitor
from ginit.visitor import (visit_path)
__project__ = "ginit"
__version__ = "1.1.0"
FILE_TO_IGNORE = ['__init__', '__main__']
PATCHER_FILES_TO_IGNORE = ['__init__.py', '__init__.pxd']
FOLDERS_TO_IGNORE = ['__pycache__']
FUNCTIONS_TO_IGNORE = ['__init__', '__str__', '__repr__', '__del__']
DIRECTORY_MODULES = "."
INIT_SEPARATOR = ", "
IMPORT_MODULE_SEPARATOR = "."
DEFAULT_IMPORT_PATCH_MAX_DEPTH = 2
PYTHON_IMPORT = "import"
PYTHON_INIT = "__init__.py"
PYTHON_EXTS = [".py"]
CYTHON_IMPORT = "cimport"
CYTHON_INIT = "__init__.pxd"
CYTHON_EXTS = [".pxd", ".pyx"]
__all__ = ['__project__', '__version__',
'ModuleVisitor', 'visit_path',
'get_python_path_from_path', 'drop_file_extension',
'DIRECTORY_MODULES', 'FILE_TO_IGNORE', 'FOLDERS_TO_IGNORE', 'PATCHER_FILES_TO_IGNORE',
'DEFAULT_IMPORT_PATCH_MAX_DEPTH', 'IMPORT_MODULE_SEPARATOR',
'PYTHON_INIT', 'PYTHON_IMPORT', 'PYTHON_EXTS',
'CYTHON_INIT', 'CYTHON_IMPORT', 'CYTHON_EXTS']
| 0 | 0 | 0 |
63872c55de4b201e80afeea714c46f3a3ea89fdc | 1,443 | py | Python | app/models/games_cached.py | Yorchly/CatchMyGame | c1c9035f7ca98d20c0f6bc95d963fbdd25f1f513 | [
"MIT"
] | null | null | null | app/models/games_cached.py | Yorchly/CatchMyGame | c1c9035f7ca98d20c0f6bc95d963fbdd25f1f513 | [
"MIT"
] | null | null | null | app/models/games_cached.py | Yorchly/CatchMyGame | c1c9035f7ca98d20c0f6bc95d963fbdd25f1f513 | [
"MIT"
] | null | null | null | from datetime import datetime
from app.models.game import Game
| 30.0625 | 91 | 0.631324 | from datetime import datetime
from app.models.game import Game
class GamesCached:
games = []
last_update = datetime.now()
time_to_clean = float(2)
games_max_len = 250
last_replaced_position = 0
def search_game_by_name(self, name):
return list(filter(lambda x: name in x.name, self.games))
def search_game_by_name_in_search(self, search_name):
return list(filter(lambda x: search_name == x.search, self.games))
def clean(self):
self.games.clear()
def check_time_and_clean(self):
diff = datetime.now() - self.last_update
if (diff.seconds / 60) >= self.time_to_clean:
self.last_update = datetime.now()
self.clean()
def add_game(self, game: Game):
"""
Add game to games list. If list has reached the limit (games_max_len) this function
will overwrite elements in positions specified by last_replaced_position.
:param game:
:type Game: Game instance
:return:
"""
if len(self.games) == self.games_max_len:
if self.last_replaced_position == self.games_max_len:
self.last_replaced_position = 0
self.games[self.last_replaced_position] = game
self.last_replaced_position += 1
else:
self.games.append(game)
def add_games(self, games: list):
for game in games:
self.add_game(game)
| 466 | 889 | 23 |
7b443300b95b485adf09ba28415ae6d947e8bbc7 | 184 | py | Python | vindauga/constants/colors.py | gabbpuy/vindauga | d4a51a618d60e83d82fd5fee585d08ff288484f3 | [
"BSD-2-Clause"
] | 5 | 2019-07-03T16:01:46.000Z | 2021-12-22T10:01:04.000Z | vindauga/constants/colors.py | gabbpuy/vindauga | d4a51a618d60e83d82fd5fee585d08ff288484f3 | [
"BSD-2-Clause"
] | null | null | null | vindauga/constants/colors.py | gabbpuy/vindauga | d4a51a618d60e83d82fd5fee585d08ff288484f3 | [
"BSD-2-Clause"
] | 1 | 2020-09-22T14:25:13.000Z | 2020-09-22T14:25:13.000Z | # -*- coding: utf-8 -*-
cmColorForegroundChanged = 71
cmColorBackgroundChanged = 72
cmColorSet = 73
cmNewColorItem = 74
cmNewColorIndex = 75
cmSaveColorIndex = 76
cmSetColorIndex = 77
| 20.444444 | 29 | 0.771739 | # -*- coding: utf-8 -*-
cmColorForegroundChanged = 71
cmColorBackgroundChanged = 72
cmColorSet = 73
cmNewColorItem = 74
cmNewColorIndex = 75
cmSaveColorIndex = 76
cmSetColorIndex = 77
| 0 | 0 | 0 |
7636f6c2c55f51c35640db72168f28063cdff74c | 915 | py | Python | src/jose/jwa/sigs.py | hdknr/jose | d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/jose/jwa/sigs.py | hdknr/jose | d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2015-01-23T09:37:17.000Z | 2015-01-23T09:37:17.000Z | src/jose/jwa/sigs.py | hdknr/jose | d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from jose.jwa import keys
from enum import Enum
__all__ = ['SigEnum', 'SigDict', ]
SigDict = dict((i.name, i.name) for i in SigEnum)
| 19.891304 | 53 | 0.538798 | from jose.jwa import keys
from enum import Enum
__all__ = ['SigEnum', 'SigDict', ]
class SigEnum(Enum):
HS256 = 'HS256'
HS384 = 'HS384'
HS512 = 'HS512'
RS256 = 'RS256'
RS384 = 'RS384'
RS512 = 'RS512'
PS256 = 'PS256'
PS384 = 'PS384'
PS512 = 'PS512'
ES256 = 'ES256'
ES384 = 'ES384'
ES512 = 'ES512'
NONE = 'none'
@property
def signer(self):
import rsa
import ec
import hmac
import misc
mod = dict(H=hmac, R=rsa,
P=rsa, E=ec, N=misc)[self.name[0]]
return getattr(mod, self.name)
@property
def key_type(self):
return dict(
H=keys.KeyTypeEnum.OCT,
N=keys.KeyTypeEnum.OCT,
R=keys.KeyTypeEnum.RSA,
P=keys.KeyTypeEnum.RSA,
E=keys.KeyTypeEnum.EC,)[self.name[0]]
SigDict = dict((i.name, i.name) for i in SigEnum)
| 413 | 343 | 23 |
165c3fc8a9da33802c35462637389a3a888c8258 | 8,625 | py | Python | diagnostics/ngtf_graph_viewer.py | sreeni-k/ngraph-tf | 4280a49ecffb92bb1ffa8ea212b22e0db8729f6e | [
"Apache-2.0"
] | null | null | null | diagnostics/ngtf_graph_viewer.py | sreeni-k/ngraph-tf | 4280a49ecffb92bb1ffa8ea212b22e0db8729f6e | [
"Apache-2.0"
] | null | null | null | diagnostics/ngtf_graph_viewer.py | sreeni-k/ngraph-tf | 4280a49ecffb92bb1ffa8ea212b22e0db8729f6e | [
"Apache-2.0"
] | null | null | null | # ==============================================================================
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import ngraph_bridge
import tensorflow as tf
import numpy as np
import re
import os
import pdb
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
import argparse
import pickle as pkl
def modify_node_names(graph_def, node_map):
'''
Accepts a graphdef and a map of node name to new node name.
Replaces the nodes with their new names in the graphdef
'''
for node in graph_def.node:
if node.name in node_map:
old_name = node.name
new_name = node_map.get(node.name)
# print("Replacing: ", node.name, " with ", new_name)
node.name = new_name
for _node in graph_def.node:
for idx, inp_name in enumerate(_node.input):
# removing the part after ':' in the name
# removing ^ if present (control dependency)
colon_split = inp_name.split(':')
assert len(colon_split) <= 2
control_dependency_part = '^' if inp_name[0] == '^' else ''
colon_part = '' if len(
colon_split) == 1 else ':' + colon_split[1]
if inp_name.lstrip('^').split(':')[0] == old_name:
_node.input[idx] = control_dependency_part + \
new_name + colon_part
# TODO: Do we need to edit this anywhere else other than inputs?
return graph_def
def sanitize_node_names(graph_def):
'''
remove '_' from node names. '_' at the beginning of node names indicate internal ops
which might cause TB to complain
'''
return modify_node_names(graph_def, {
node.name: node.name[1:]
for node in graph_def.node
if node.name[0] == "_"
})
def prepend_to_name(graph_def, node_map):
'''
prepend an extra string to the node name (presumably a scope, to denote encapsulate)
'''
return modify_node_names(
graph_def, {
node.name: node_map[node.name] + node.name
for node in graph_def.node
if node.name in node_map
})
def load_file(graph_file, input_binary, modifier_function_list=[]):
'''
can load protobuf (pb or pbtxt). can modify only pbtxt for now
'''
if not gfile.Exists(graph_file):
raise Exception("Input graph file '" + graph_file + "' does not exist!")
graphdef = graph_pb2.GraphDef()
with open(graph_file, "r") as f:
protobuf_str = f.read()
try:
if input_binary:
graphdef.ParseFromString(protobuf_str)
else:
text_format.Merge(protobuf_str, graphdef)
except:
raise Exception("Failed to read pb or pbtxt. input_binary is " +
str(input_binary) + " maybe try flipping it?")
for modifier_function in modifier_function_list:
graphdef = modifier_function(graphdef)
return graphdef
visualizations_supported = [protobuf_to_dot, protobuf_to_grouped_tensorboard]
if __name__ == "__main__":
helptxt = '''
Convert protobuf to different visualizations (dot, tensorboard).
Sample usage from command line:
python ngtf_graph_viewer.py pbtxtfile.pbtxt ./vis # read pbtxt and generate TB
python ngtf_graph_viewer.py -v 1 pbtxtfile.pbtxt ./vis # read pbtxt and generate dot
python ngtf_graph_viewer.py -b pbtxtfile.pb ./vis # read pb and generate TB
python ngtf_graph_viewer.py -b -v 1 pbtxtfile.pb ./vis # read pb and generate dot
python ngtf_graph_viewer.py -c nodemap.pkl pbtxtfile.pbtxt ./vis # read pbtxt, remap node names and generate TB
One can also import the file and use its functions
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, description=helptxt)
parser.add_argument("input", help="The input protobuf (pb or pbtxt)")
parser.add_argument("out", help="The output directory")
parser.add_argument(
'-b',
dest='binary',
action='store_true',
help=
"Add this flag to indicate its a .pb. Else it is assumed to be a .pbtxt"
)
parser.add_argument(
"-v",
"--visualize",
type=int,
default=1,
help=
"Enter 0 (protobuf->dot) or 1 (protobuf->Tensorboard). By default it converts to tensorboard"
)
parser.add_argument(
"-c",
"--cluster",
help=
"An file that contains the node-to-cluster map that can be used to group them into clusters"
)
args = parser.parse_args()
node_map = {} if args.cluster is None else pkl.load(
open(args.cluster, 'rb'))
visualizations_supported[args.visualize](args.input, args.out, args.binary,
node_map)
| 39.383562 | 125 | 0.616696 | # ==============================================================================
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import print_function
import ngraph_bridge
import tensorflow as tf
import numpy as np
import re
import os
import pdb
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
import argparse
import pickle as pkl
def modify_node_names(graph_def, node_map):
'''
Accepts a graphdef and a map of node name to new node name.
Replaces the nodes with their new names in the graphdef
'''
for node in graph_def.node:
if node.name in node_map:
old_name = node.name
new_name = node_map.get(node.name)
# print("Replacing: ", node.name, " with ", new_name)
node.name = new_name
for _node in graph_def.node:
for idx, inp_name in enumerate(_node.input):
# removing the part after ':' in the name
# removing ^ if present (control dependency)
colon_split = inp_name.split(':')
assert len(colon_split) <= 2
control_dependency_part = '^' if inp_name[0] == '^' else ''
colon_part = '' if len(
colon_split) == 1 else ':' + colon_split[1]
if inp_name.lstrip('^').split(':')[0] == old_name:
_node.input[idx] = control_dependency_part + \
new_name + colon_part
# TODO: Do we need to edit this anywhere else other than inputs?
return graph_def
def sanitize_node_names(graph_def):
'''
remove '_' from node names. '_' at the beginning of node names indicate internal ops
which might cause TB to complain
'''
return modify_node_names(graph_def, {
node.name: node.name[1:]
for node in graph_def.node
if node.name[0] == "_"
})
def prepend_to_name(graph_def, node_map):
'''
prepend an extra string to the node name (presumably a scope, to denote encapsulate)
'''
return modify_node_names(
graph_def, {
node.name: node_map[node.name] + node.name
for node in graph_def.node
if node.name in node_map
})
def load_file(graph_file, input_binary, modifier_function_list=[]):
'''
can load protobuf (pb or pbtxt). can modify only pbtxt for now
'''
if not gfile.Exists(graph_file):
raise Exception("Input graph file '" + graph_file + "' does not exist!")
graphdef = graph_pb2.GraphDef()
with open(graph_file, "r") as f:
protobuf_str = f.read()
try:
if input_binary:
graphdef.ParseFromString(protobuf_str)
else:
text_format.Merge(protobuf_str, graphdef)
except:
raise Exception("Failed to read pb or pbtxt. input_binary is " +
str(input_binary) + " maybe try flipping it?")
for modifier_function in modifier_function_list:
graphdef = modifier_function(graphdef)
return graphdef
def preprocess(input_filename, out_dir, input_binary, node_map):
# Note: node_map should be applied before sanitize_node_names.
# Else sanitize_node_names might change the node names, which might become unrecognizable to node_map
modifiers = [
lambda pbtxt_str: prepend_to_name(pbtxt_str, node_map),
sanitize_node_names
]
gdef = load_file(input_filename, input_binary, modifiers)
if not os.path.exists(out_dir): # create output dir if it does not exist
os.makedirs(out_dir)
return gdef
def graphdef_to_dot(gdef, dot_output):
with open(dot_output, "wb") as f:
print("digraph graphname {", file=f)
for node in gdef.node:
output_name = node.name
print(
" \"" + output_name + "\" [label=\"" + node.op + "\"];",
file=f)
for input_full_name in node.input:
parts = input_full_name.split(":")
input_name = re.sub(r"^\^", "", parts[0])
print(
" \"" + input_name + "\" -> \"" + output_name + "\";",
file=f)
print("}", file=f)
print("\n" + ('=-' * 30))
print("Created DOT file '" + dot_output + "'.")
print("Can be converted to pdf using: dot -Tpdf " + dot_output + " -o " +
dot_output + ".pdf")
print('=-' * 30)
def protobuf_to_dot(input_filename, dot_dir, input_binary=False, node_map={}):
gdef = preprocess(input_filename, dot_dir, input_binary, node_map)
graphdef_to_dot(
gdef,
dot_dir.rstrip('/') + '/' + os.path.basename(input_filename) + '.dot')
def graphdef_to_tensorboard(gdef, tensorboard_output):
# convert graphdef to graph, even though FileWriter can accepts graphdefs.
# this is because FileWriter has deprecated graphdef as inputs, and prefers graphs as inputs
with tf.Session() as sess:
tf.import_graph_def(gdef)
writer = tf.summary.FileWriter(tensorboard_output, sess.graph)
# TODO: try with tf master
# wont work now if we have NGraphVariable, NGraphEncapsulateOp
# TODO: How about supporting NGraphVariable and NGraphEncapsulateOp by switching their optype with something TB knows
writer.flush()
writer.close()
# It seems NGraphVariable and NGraphEncapsulateOp are registered in C++ but not in python
print("\n" + ('=-' * 30) + "\nTo view Tensorboard:")
print("1) Run this command: tensorboard --logdir " + tensorboard_output)
print("2) Go to the URL it provides or http://localhost:6006/\n" +
('=-' * 30) + "\n")
def protobuf_to_grouped_tensorboard(input_filename,
tensorboard_dir,
input_binary=False,
node_map={}):
gdef = preprocess(input_filename, tensorboard_dir, input_binary, node_map)
graphdef_to_tensorboard(gdef, tensorboard_dir)
visualizations_supported = [protobuf_to_dot, protobuf_to_grouped_tensorboard]
if __name__ == "__main__":
helptxt = '''
Convert protobuf to different visualizations (dot, tensorboard).
Sample usage from command line:
python ngtf_graph_viewer.py pbtxtfile.pbtxt ./vis # read pbtxt and generate TB
python ngtf_graph_viewer.py -v 1 pbtxtfile.pbtxt ./vis # read pbtxt and generate dot
python ngtf_graph_viewer.py -b pbtxtfile.pb ./vis # read pb and generate TB
python ngtf_graph_viewer.py -b -v 1 pbtxtfile.pb ./vis # read pb and generate dot
python ngtf_graph_viewer.py -c nodemap.pkl pbtxtfile.pbtxt ./vis # read pbtxt, remap node names and generate TB
One can also import the file and use its functions
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, description=helptxt)
parser.add_argument("input", help="The input protobuf (pb or pbtxt)")
parser.add_argument("out", help="The output directory")
parser.add_argument(
'-b',
dest='binary',
action='store_true',
help=
"Add this flag to indicate its a .pb. Else it is assumed to be a .pbtxt"
)
parser.add_argument(
"-v",
"--visualize",
type=int,
default=1,
help=
"Enter 0 (protobuf->dot) or 1 (protobuf->Tensorboard). By default it converts to tensorboard"
)
parser.add_argument(
"-c",
"--cluster",
help=
"An file that contains the node-to-cluster map that can be used to group them into clusters"
)
args = parser.parse_args()
node_map = {} if args.cluster is None else pkl.load(
open(args.cluster, 'rb'))
visualizations_supported[args.visualize](args.input, args.out, args.binary,
node_map)
| 2,836 | 0 | 115 |
c02a52b4b0ef55ecee2c813d47117274a7aa3a60 | 928 | py | Python | jp.atcoder/abc146/abc146_d/8717392.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc146/abc146_d/8717392.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc146/abc146_d/8717392.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
from collections import deque
import numpy as np
if __name__ == '__main__':
main()
| 21.581395 | 75 | 0.43319 | import sys
from collections import deque
import numpy as np
def main():
n = int(sys.stdin.readline().rstrip())
ab = np.array(sys.stdin.read().split(), dtype=np.int64).reshape(-1, 2)
G = [[] for _ in range(n + 1)]
for a, b in ab:
G[a].append(b)
G[b].append(a)
parent = np.zeros(n + 1, dtype=np.int64)
color = np.zeros((n + 1, n + 1), dtype=np.int64)
root = 1
ng = None
q = deque()
q.append((root, ng))
while q:
x, ng = q.popleft()
c = 1
for y in G[x]:
if y == parent[x]:
continue
parent[y] = x
if c == ng:
c += 1
color[x][y] = c
color[y][x] = c
q.append((y, c))
c += 1
print(np.amax(color))
for a, b in ab:
print(color[a][b])
if __name__ == '__main__':
main()
| 795 | 0 | 25 |
c6cf6b79e59cc7249507fefb539ab9804b04cced | 2,590 | py | Python | netlists/oceanv3/test/test_KPIs.py | AlgoveraAI/tokenspice | e64531ac09e51bcb2d34908fd74f809834767408 | [
"MIT"
] | 81 | 2021-08-01T10:00:15.000Z | 2022-03-30T15:05:52.000Z | netlists/oceanv3/test/test_KPIs.py | AlgoveraAI/tokenspice | e64531ac09e51bcb2d34908fd74f809834767408 | [
"MIT"
] | 93 | 2021-08-03T16:21:54.000Z | 2022-03-22T05:36:51.000Z | netlists/oceanv3/test/test_KPIs.py | AlgoveraAI/tokenspice | e64531ac09e51bcb2d34908fd74f809834767408 | [
"MIT"
] | 15 | 2021-08-10T04:02:32.000Z | 2022-03-30T05:51:56.000Z | from pytest import approx
from enforce_typing import enforce_types
from agents.PoolAgent import PoolAgent
from util import globaltokens
from util.base18 import fromBase18
from .. import KPIs
@enforce_types
@enforce_types
@enforce_types
@enforce_types
@enforce_types
| 27.849462 | 87 | 0.698842 | from pytest import approx
from enforce_typing import enforce_types
from agents.PoolAgent import PoolAgent
from util import globaltokens
from util.base18 import fromBase18
from .. import KPIs
@enforce_types
class MockAgentDict(dict): # subset of engine.AgentDict
def __init__(self, *arg, **kw): # pylint: disable=useless-super-delegation
super().__init__(*arg, **kw)
def filterToPool(self):
return self.filterByClass(PoolAgent)
def filterByClass(self, _class):
return MockAgentDict(
{agent.name: agent for agent in self.values() if isinstance(agent, _class)}
)
@enforce_types
class MockSimState:
def __init__(self):
self.agents = MockAgentDict()
def getAgent(self, name: str):
return self.agents[name]
@enforce_types
class FooAgent:
def __init__(self, name: str):
self.name = name
self._DT = 3.0 # magic number
self._BPT = 5.0 # ""
def DT(self, dt) -> float: # pylint: disable=unused-argument
return self._DT
def BPT(self, pool) -> float: # pylint: disable=unused-argument
return self._BPT
@enforce_types
def test_get_OCEAN_in_DTs(alice_info):
state = MockSimState()
pool, DT = alice_info.pool, alice_info.DT
pool_agent = PoolAgent("pool_agent", pool)
state.agents["agent1"] = pool_agent
foo_agent = FooAgent("foo_agent")
OCEAN_address = globaltokens.OCEAN_address()
price = fromBase18(pool.getSpotPrice(OCEAN_address, DT.address))
amt_DT = foo_agent.DT("bar")
assert amt_DT == 3.0
value_held = KPIs.get_OCEAN_in_DTs(state, foo_agent)
assert value_held == amt_DT * price
@enforce_types
def test_get_OCEAN_in_BPTs(alice_info):
state = MockSimState()
pool, DT = alice_info.pool, alice_info.DT
pool_agent = PoolAgent("pool_agent", pool)
state.agents["agent1"] = pool_agent
foo_agent = FooAgent("foo_agent")
OCEAN_address = globaltokens.OCEAN_address()
price = fromBase18(pool.getSpotPrice(OCEAN_address, DT.address))
pool_value_DT = price * fromBase18(pool.getBalance(DT.address))
pool_value_OCEAN = fromBase18(pool.getBalance(OCEAN_address))
pool_value = pool_value_DT + pool_value_OCEAN
# case: foo_agent no BPTs
value_held = KPIs.get_OCEAN_in_BPTs(state, foo_agent)
assert value_held == approx(0.0 * pool_value)
# case: foo_agent has all BPTs
foo_agent._BPT = pool.totalSupply() # make pool think agent has 100% of BPTs
value_held = KPIs.get_OCEAN_in_BPTs(state, foo_agent)
assert value_held == 1.0 * pool_value
| 1,962 | 27 | 323 |
0452640dc396dae7bf11c90b11baf95a47a472b4 | 1,542 | py | Python | hyde/environment.py | ty-porter/hyde | 69387e072e80adf2b2f72c3660da8093d6e2297c | [
"MIT"
] | null | null | null | hyde/environment.py | ty-porter/hyde | 69387e072e80adf2b2f72c3660da8093d6e2297c | [
"MIT"
] | null | null | null | hyde/environment.py | ty-porter/hyde | 69387e072e80adf2b2f72c3660da8093d6e2297c | [
"MIT"
] | null | null | null | from hyde.errors import BaseError
| 25.278689 | 70 | 0.606355 | from hyde.errors import BaseError
class RuntimeError(BaseError):
pass
class Environment:
def __init__(self, enclosing = None):
self.values = {}
self.enclosing = enclosing
def assign(self, name, value):
if name.lexeme in self.values:
self.values[name.lexeme] = value
return
if self.enclosing is not None:
return self.enclosing.assign(name, value)
raise RuntimeError(name, f'Undefined variable {name.lexeme}.')
def assign_at(self, distance, name, value):
self.ancestor(distance).values[name.lexeme] = value
def define(self, name, value):
self.values[name] = value
def get(self, name):
if name.lexeme in self.values:
return self.values[name.lexeme]
if self.enclosing is not None:
return self.enclosing.get(name)
raise RuntimeError(name, f'Undefined variable {name.lexeme}.')
def get_at(self, distance, name):
return self.ancestor(distance).values.get(name)
def ancestor(self, distance):
environment = self
while distance > 0:
distance -= 1
environment = environment.enclosing
return environment
def merge(self, other):
target = self
while other:
target.values.update(other.values)
if not self.enclosing and other.enclosing:
target.enclosing = Environment()
target = self.enclosing
other = other.enclosing
| 1,226 | 15 | 265 |
fe29a64b9df91b693cfa6dc68513305a950217e0 | 6,091 | py | Python | info/views.py | ShaverJT/ThetaTauMiami | 975e4a2a29493166880be412200ed3107c0efc60 | [
"Apache-2.0"
] | 1 | 2015-04-13T18:55:12.000Z | 2015-04-13T18:55:12.000Z | info/views.py | ShaverJT/ThetaTauMiami | 975e4a2a29493166880be412200ed3107c0efc60 | [
"Apache-2.0"
] | null | null | null | info/views.py | ShaverJT/ThetaTauMiami | 975e4a2a29493166880be412200ed3107c0efc60 | [
"Apache-2.0"
] | null | null | null | import math
from django.http import HttpResponse
from django.template import Context, loader
from django.shortcuts import render, get_object_or_404
from info.models import Brother, Officer, BrotherEntity
from info import utility
from marketing.models import Picture as MarketingPic
from articles.models import Article
max_brothers_per_page = 24
standard_brothers_per_page = 9
brothers_per_row = 3
max_pages_listed_on_screen = 5
officers_per_row = 2
exec_board_members_per_row_on_about_page = 3
def general_listing(request, isAlumniFilter, isPledgeFilter, name):
'''
Retrieves all of the information necessary for each of the brother listings.
Retrieves information based on the isAlumniFilter and isPledgeFilter
'''
brothers_count = get_brother_count(request)
page_number = get_page_number(request)
brothers_range_min = (page_number - 1) * brothers_count
brothers_range_max = (page_number) * brothers_count
brothers = Brother.objects.filter(isAlumni=isAlumniFilter, isPledge=isPledgeFilter).order_by(
'lastName', 'firstName', 'middleName')
number_of_brothers = len(brothers)
total_pages = int(math.ceil(number_of_brothers / float(brothers_count)))
brothers = brothers[brothers_range_min:brothers_range_max]
brothers = convert_brothers_to_brotherentities(brothers)
brother_list_list = utility.convert_array_to_YxZ(brothers, brothers_per_row) if len(brothers) > 0 else None
page_numbers_list = calculate_page_range(total_pages, page_number)
next_page = page_number + 1 if number_of_brothers > brothers_range_max else 0
prev_page = page_number - 1
context_dict = {
'brotherType': name,
'brother_list_list' : brother_list_list,
'page_number' : page_number,
'prev_page': prev_page,
'next_page' : next_page,
'page_numbers' : page_numbers_list
}
if brothers_count != standard_brothers_per_page:
context_dict['brothers_count'] = brothers_count
c = Context(context_dict)
t = loader.get_template('brothers_list.html')
return HttpResponse(t.render(c))
def convert_brothers_to_brotherentities(broList):
'''
Converts a set of brothers and converts them to brother entities
which contain more information
'''
broEList = []
for bro in broList:
broEList.append(BrotherEntity(bro))
return broEList
def get_brother_count(request):
'''
Finds the requested number of brothers and corrects it if there are any issues
If the number is invalid, it will return standard_brothers_per_page
'''
brothers_count = request.GET.get('count',str(standard_brothers_per_page))
try:
brothers_count = int(brothers_count)
if brothers_count > max_brothers_per_page:
brothers_count = max_brothers_per_page
except:
brothers_count = standard_brothers_per_page
return brothers_count
def get_page_number(request):
'''
Finds the page number and corrects it if there are any issues
If the page number is invalid, it will return 1
'''
page_number = request.GET.get('page','1')
try:
page_number = int(page_number)
if page_number < 1:
page_number = 1
except:
page_number = 1
return page_number
def calculate_page_range(total_pages, page_number):
'''
This determines which page numbers to show at the bottom of the brothers list pages.
It returns a list of integers that should be displayed on the page based on the total
number of pages and the current page number.
'''
if total_pages == 1: # If there is only the one page, there is no need to display page numbers
return []
elif total_pages <= max_pages_listed_on_screen: # In this case, just display all of the available pages
min_page_number_displayed = 1
max_page_number_displayed = total_pages + 1
elif page_number - max_pages_listed_on_screen / 2 <= 1: # We are near the beginning. In this case, display from page 1 to max_pages_listed_on_screen
min_page_number_displayed = 1
max_page_number_displayed = min_page_number_displayed + max_pages_listed_on_screen
elif page_number + max_pages_listed_on_screen / 2 >= total_pages: # We are near the end. In this case, display from (end - max_pages_listed_on_screen) to end
max_page_number_displayed = total_pages + 1
min_page_number_displayed = max_page_number_displayed - max_pages_listed_on_screen
else: # We are somewhere in the middle. In this case, just display some pages on either side
min_page_number_displayed = page_number - max_pages_listed_on_screen / 2
max_page_number_displayed = min_page_number_displayed + max_pages_listed_on_screen
page_numbers_list = range(min_page_number_displayed,max_page_number_displayed)
return page_numbers_list
| 42.894366 | 161 | 0.728944 | import math
from django.http import HttpResponse
from django.template import Context, loader
from django.shortcuts import render, get_object_or_404
from info.models import Brother, Officer, BrotherEntity
from info import utility
from marketing.models import Picture as MarketingPic
from articles.models import Article
max_brothers_per_page = 24
standard_brothers_per_page = 9
brothers_per_row = 3
max_pages_listed_on_screen = 5
officers_per_row = 2
exec_board_members_per_row_on_about_page = 3
def index(request):
t = loader.get_template('about.html')
officer_list = Officer.objects.filter().order_by('ordering')
group_pic = MarketingPic.objects.filter(name='Group')[0]
recent_events = Article.objects.all().order_by('-date')[:6]
c = Context({'officer_list': officer_list, 'group_pic': group_pic, 'articles': recent_events})
return HttpResponse(t.render(c))
def brother_profile(request, brother_id):
bro = get_object_or_404(Brother, pk = brother_id)
return render(request, 'brother_profile.html', {'be': BrotherEntity(bro)})
def officers(request):
officers = Officer.objects.filter().order_by('ordering')
officers_matrix = utility.convert_array_to_YxZ(officers, officers_per_row)
c = Context({'officer_list_list': officers_matrix})
t = loader.get_template('officers_list.html')
return HttpResponse(t.render(c))
def actives(request):
return general_listing(request, False, False, 'Active Members')
def pledges(request):
return general_listing(request, False, True, 'Pledges')
def alumni(request):
return general_listing(request, True, False, 'Alumni')
def general_listing(request, isAlumniFilter, isPledgeFilter, name):
'''
Retrieves all of the information necessary for each of the brother listings.
Retrieves information based on the isAlumniFilter and isPledgeFilter
'''
brothers_count = get_brother_count(request)
page_number = get_page_number(request)
brothers_range_min = (page_number - 1) * brothers_count
brothers_range_max = (page_number) * brothers_count
brothers = Brother.objects.filter(isAlumni=isAlumniFilter, isPledge=isPledgeFilter).order_by(
'lastName', 'firstName', 'middleName')
number_of_brothers = len(brothers)
total_pages = int(math.ceil(number_of_brothers / float(brothers_count)))
brothers = brothers[brothers_range_min:brothers_range_max]
brothers = convert_brothers_to_brotherentities(brothers)
brother_list_list = utility.convert_array_to_YxZ(brothers, brothers_per_row) if len(brothers) > 0 else None
page_numbers_list = calculate_page_range(total_pages, page_number)
next_page = page_number + 1 if number_of_brothers > brothers_range_max else 0
prev_page = page_number - 1
context_dict = {
'brotherType': name,
'brother_list_list' : brother_list_list,
'page_number' : page_number,
'prev_page': prev_page,
'next_page' : next_page,
'page_numbers' : page_numbers_list
}
if brothers_count != standard_brothers_per_page:
context_dict['brothers_count'] = brothers_count
c = Context(context_dict)
t = loader.get_template('brothers_list.html')
return HttpResponse(t.render(c))
def convert_brothers_to_brotherentities(broList):
'''
Converts a set of brothers and converts them to brother entities
which contain more information
'''
broEList = []
for bro in broList:
broEList.append(BrotherEntity(bro))
return broEList
def get_brother_count(request):
'''
Finds the requested number of brothers and corrects it if there are any issues
If the number is invalid, it will return standard_brothers_per_page
'''
brothers_count = request.GET.get('count',str(standard_brothers_per_page))
try:
brothers_count = int(brothers_count)
if brothers_count > max_brothers_per_page:
brothers_count = max_brothers_per_page
except:
brothers_count = standard_brothers_per_page
return brothers_count
def get_page_number(request):
'''
Finds the page number and corrects it if there are any issues
If the page number is invalid, it will return 1
'''
page_number = request.GET.get('page','1')
try:
page_number = int(page_number)
if page_number < 1:
page_number = 1
except:
page_number = 1
return page_number
def calculate_page_range(total_pages, page_number):
'''
This determines which page numbers to show at the bottom of the brothers list pages.
It returns a list of integers that should be displayed on the page based on the total
number of pages and the current page number.
'''
if total_pages == 1: # If there is only the one page, there is no need to display page numbers
return []
elif total_pages <= max_pages_listed_on_screen: # In this case, just display all of the available pages
min_page_number_displayed = 1
max_page_number_displayed = total_pages + 1
elif page_number - max_pages_listed_on_screen / 2 <= 1: # We are near the beginning. In this case, display from page 1 to max_pages_listed_on_screen
min_page_number_displayed = 1
max_page_number_displayed = min_page_number_displayed + max_pages_listed_on_screen
elif page_number + max_pages_listed_on_screen / 2 >= total_pages: # We are near the end. In this case, display from (end - max_pages_listed_on_screen) to end
max_page_number_displayed = total_pages + 1
min_page_number_displayed = max_page_number_displayed - max_pages_listed_on_screen
else: # We are somewhere in the middle. In this case, just display some pages on either side
min_page_number_displayed = page_number - max_pages_listed_on_screen / 2
max_page_number_displayed = min_page_number_displayed + max_pages_listed_on_screen
page_numbers_list = range(min_page_number_displayed,max_page_number_displayed)
return page_numbers_list
| 989 | 0 | 138 |
765dc73d1e00d6074bedf595a2b6b30c969785f0 | 875 | py | Python | wawhfd/urls.py | calebrash/wawhfd | 1b060eda6f6e7dbde6dd222d47ca5f04768ed403 | [
"MIT"
] | 1 | 2018-01-26T06:16:34.000Z | 2018-01-26T06:16:34.000Z | wawhfd/urls.py | calebrash/wawhfd | 1b060eda6f6e7dbde6dd222d47ca5f04768ed403 | [
"MIT"
] | null | null | null | wawhfd/urls.py | calebrash/wawhfd | 1b060eda6f6e7dbde6dd222d47ca5f04768ed403 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from wawhfd.views import (
IndexView,
DatesListView,
DatesEditView,
DatesDeleteView,
RecipesListView,
RecipesAddView,
RecipesEditView,
RecipesDeleteView,
)
urlpatterns = [
url(r'^$', IndexView.as_view()),
url(r'^api/dates/$', DatesListView.as_view()),
url(r'^api/dates/(?P<date_str>[\d]{4}-[\d]{2}-[\d]{2})/edit/$', DatesEditView.as_view()),
url(r'^api/dates/(?P<date_str>[\d]{4}-[\d]{2}-[\d]{2})/delete/$', DatesDeleteView.as_view()),
url(r'^api/recipes/$', RecipesListView.as_view()),
url(r'^api/recipes/add/$', RecipesAddView.as_view()),
url(r'^api/recipes/(?P<recipe_id>[\d]+)/edit/$', RecipesEditView.as_view()),
url(r'^api/recipes/(?P<recipe_id>[\d]+)/delete/$', RecipesDeleteView.as_view()),
url(r'^admin/', admin.site.urls),
]
| 29.166667 | 97 | 0.637714 | from django.conf.urls import url
from django.contrib import admin
from wawhfd.views import (
IndexView,
DatesListView,
DatesEditView,
DatesDeleteView,
RecipesListView,
RecipesAddView,
RecipesEditView,
RecipesDeleteView,
)
urlpatterns = [
url(r'^$', IndexView.as_view()),
url(r'^api/dates/$', DatesListView.as_view()),
url(r'^api/dates/(?P<date_str>[\d]{4}-[\d]{2}-[\d]{2})/edit/$', DatesEditView.as_view()),
url(r'^api/dates/(?P<date_str>[\d]{4}-[\d]{2}-[\d]{2})/delete/$', DatesDeleteView.as_view()),
url(r'^api/recipes/$', RecipesListView.as_view()),
url(r'^api/recipes/add/$', RecipesAddView.as_view()),
url(r'^api/recipes/(?P<recipe_id>[\d]+)/edit/$', RecipesEditView.as_view()),
url(r'^api/recipes/(?P<recipe_id>[\d]+)/delete/$', RecipesDeleteView.as_view()),
url(r'^admin/', admin.site.urls),
]
| 0 | 0 | 0 |
22668c307ec65269c60dd26d9f5a9adf952a0166 | 3,501 | py | Python | listgames.py | Emetophobe/steamutils | 201e6f94c66b1b2432d47102b68fa23b0cabef8c | [
"MIT"
] | 2 | 2020-05-20T00:32:47.000Z | 2020-05-20T00:52:43.000Z | listgames.py | Emetophobe/steamutils | 201e6f94c66b1b2432d47102b68fa23b0cabef8c | [
"MIT"
] | null | null | null | listgames.py | Emetophobe/steamutils | 201e6f94c66b1b2432d47102b68fa23b0cabef8c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2019-2020 Emetophobe (snapnaw@gmail.com)
# https://github.com/Emetophobe/steamutils/
import os
import re
import glob
import argparse
def list_games(steamdir):
""" Get the list of installed Steam games. """
# Make sure the directory is valid
steam_apps = os.path.join(os.path.abspath(steamdir), 'steamapps')
steam_common = os.path.join(steam_apps, 'common')
if not os.path.isdir(steam_apps) and not os.path.isdir(steam_common):
raise ValueError('Error: Invalid steam directory.')
# Get list of manifest files from the steamapps directory
acf_files = glob.glob(os.path.join(steam_apps, 'appmanifest_*.acf'))
# Parse manifest files and create a list of game dicts
games = []
for filename in acf_files:
with open(filename, 'r') as fp:
manifest = {}
for line in fp:
# Extract the key/value pairs
matches = re.findall(r'"(.*?)"', line) # find strings inside double quotes
if len(matches) == 2: # require a pair of strings
key, value = matches[0], matches[1]
manifest[key] = value # store the key/value pair
# Add the full path to the installdir and manifest file
manifest['installdir'] = os.path.join(steam_common, manifest['installdir'])
manifest['manifest'] = filename
games.append(manifest)
return sorted(games, key=lambda k: k['name'])
def print_games(games):
""" Print a tabular games list. """
row = '{:<50} {:<10} {}'
print(row.format('Name', 'App Id', 'Location'))
for game in games:
print(row.format(game['name'], game['appid'], game['installdir']))
def print_detailed_games(games):
""" Print a detailed games list. """
for game in games:
print()
print('name:', game['name'])
print('appid:', game['appid'])
print('installdir:', game['installdir'])
print('manifest:', game['manifest'])
print('size:', format_size(game['SizeOnDisk']))
def format_size(size):
""" Format install size into a human readable string. """
size = int(size)
for suffix in ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB'):
if size < 1024:
return '{:.1f}{}'.format(size, suffix)
size /= 1024
return '{:1f}YB'.format(size)
if __name__ == '__main__':
try:
main()
except (OSError, ValueError) as e:
print(e)
| 33.342857 | 112 | 0.604113 | #!/usr/bin/env python
# Copyright (C) 2019-2020 Emetophobe (snapnaw@gmail.com)
# https://github.com/Emetophobe/steamutils/
import os
import re
import glob
import argparse
def list_games(steamdir):
""" Get the list of installed Steam games. """
# Make sure the directory is valid
steam_apps = os.path.join(os.path.abspath(steamdir), 'steamapps')
steam_common = os.path.join(steam_apps, 'common')
if not os.path.isdir(steam_apps) and not os.path.isdir(steam_common):
raise ValueError('Error: Invalid steam directory.')
# Get list of manifest files from the steamapps directory
acf_files = glob.glob(os.path.join(steam_apps, 'appmanifest_*.acf'))
# Parse manifest files and create a list of game dicts
games = []
for filename in acf_files:
with open(filename, 'r') as fp:
manifest = {}
for line in fp:
# Extract the key/value pairs
matches = re.findall(r'"(.*?)"', line) # find strings inside double quotes
if len(matches) == 2: # require a pair of strings
key, value = matches[0], matches[1]
manifest[key] = value # store the key/value pair
# Add the full path to the installdir and manifest file
manifest['installdir'] = os.path.join(steam_common, manifest['installdir'])
manifest['manifest'] = filename
games.append(manifest)
return sorted(games, key=lambda k: k['name'])
def print_games(games):
""" Print a tabular games list. """
row = '{:<50} {:<10} {}'
print(row.format('Name', 'App Id', 'Location'))
for game in games:
print(row.format(game['name'], game['appid'], game['installdir']))
def print_detailed_games(games):
""" Print a detailed games list. """
for game in games:
print()
print('name:', game['name'])
print('appid:', game['appid'])
print('installdir:', game['installdir'])
print('manifest:', game['manifest'])
print('size:', format_size(game['SizeOnDisk']))
def format_size(size):
""" Format install size into a human readable string. """
size = int(size)
for suffix in ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB'):
if size < 1024:
return '{:.1f}{}'.format(size, suffix)
size /= 1024
return '{:1f}YB'.format(size)
def main():
# Parse arguments
parser = argparse.ArgumentParser(description='List installed games or find a specific game.')
parser.add_argument('steamdir', help='location of the steam directory', type=str)
parser.add_argument('-s', '--search', metavar='name', help='search for a specific game or app id', type=str)
parser.add_argument('-v', '--verbose', help='verbose game details', action='store_true')
args = parser.parse_args()
# Get list of installed games
games = list_games(args.steamdir)
# Apply optional search filter
if args.search is not None:
matches = []
for game in games:
if args.search.lower() in game['name'].lower() or args.search in game['appid']:
matches.append(game)
games = matches
# Print results
if not games:
print('Found 0 games.')
elif args.verbose:
print_detailed_games(games)
else:
print_games(games)
if __name__ == '__main__':
try:
main()
except (OSError, ValueError) as e:
print(e)
| 942 | 0 | 23 |
313e9e6deabe030c8b33cec7fe570dc60615c783 | 790 | py | Python | scripts/convert_spm_vocab.py | ZurichNLP/segtest | 38fa641d5cb7c06060ce04baa6d9ed53dfe957b7 | [
"MIT"
] | 7 | 2021-09-02T11:23:40.000Z | 2022-02-01T21:14:07.000Z | scripts/convert_spm_vocab.py | ZurichNLP/segtest | 38fa641d5cb7c06060ce04baa6d9ed53dfe957b7 | [
"MIT"
] | null | null | null | scripts/convert_spm_vocab.py | ZurichNLP/segtest | 38fa641d5cb7c06060ce04baa6d9ed53dfe957b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from collections import OrderedDict
import json
if __name__ == '__main__':
main()
| 24.6875 | 66 | 0.541772 | #!/usr/bin/env python3
import sys
from collections import OrderedDict
import json
def main():
for filename in sys.argv[1:]:
print('Processing', filename)
sorted_words = []
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
words_in = line.strip().split()
sorted_words.append(words_in[0])
worddict = OrderedDict()
worddict['<EOS>'] = 0
worddict['<GO>'] = 1
worddict['<UNK>'] = 2
for ii, ww in enumerate(sorted_words[3:]):
worddict[ww] = ii+3
with open('%s.json'%filename, 'w', encoding='utf-8') as f:
json.dump(worddict, f, indent=2, ensure_ascii=False)
print('Done')
if __name__ == '__main__':
main()
| 643 | 0 | 23 |
4c2ac61937efc476395a80e207360408a861ca0c | 3,879 | py | Python | tensorflow/modules/datasets/dataset.py | nicolasrosa/Sparse-to-Continuous | 8664de17d6b6c6cc39bf8fcebfcb829249367f2f | [
"BSD-2-Clause"
] | 19 | 2018-09-25T01:58:13.000Z | 2021-07-27T09:54:36.000Z | tensorflow/modules/datasets/dataset.py | nicolasrosa/Sparse-to-Continuous | 8664de17d6b6c6cc39bf8fcebfcb829249367f2f | [
"BSD-2-Clause"
] | 18 | 2020-03-24T18:18:56.000Z | 2022-02-10T00:35:13.000Z | tensorflow/modules/datasets/dataset.py | nicolasrosa/Sparse-to-Continuous | 8664de17d6b6c6cc39bf8fcebfcb829249367f2f | [
"BSD-2-Clause"
] | 4 | 2019-11-14T02:35:34.000Z | 2020-08-27T11:36:02.000Z | # ===========
# Libraries
# ===========
from ..size import Size
from ..filenames import FilenamesHandler
from ..args import args
# ===================
# Class Declaration
# ===================
| 43.1 | 152 | 0.565352 | # ===========
# Libraries
# ===========
from ..size import Size
from ..filenames import FilenamesHandler
from ..args import args
# ===================
# Class Declaration
# ===================
class Dataset(FilenamesHandler):
def __init__(self, **kwargs):
super(Dataset, self).__init__()
self.dataset_path = self.get_dataset_root() + kwargs.pop('dataset_rel_path')
self.name = kwargs.pop('name')
height = kwargs.pop('height')
width = kwargs.pop('width')
self.max_depth = kwargs.pop('max_depth') # Max Depth to limit predictions
self.image_size = Size(height, width, 3)
self.depth_size = Size(height, width, 1)
self.ratio = 0.8 # Train/Test Split Ratio
print("[Dataloader] %s object created." % self.name)
@staticmethod
def get_dataset_root():
""" Defines dataset_root path depending on which machine is used."""
dataset_root = None
if args.machine == 'nicolas':
dataset_root = "/media/nicolas/nicolas_seagate/datasets/"
elif args.machine == 'olorin':
dataset_root = "/media/olorin/Documentos/datasets/"
elif args.machine == 'olorin_from_nicolas':
dataset_root = "/home/nicolas/remote/olorin_root/media/olorin/Documentos/datasets/"
return dataset_root
def get_file_path(self, mode, test_split, test_file_path):
# KITTI Stereo 2015: 200 Test Images
# Eigen Split: 697 Test Images
# Eigen Split & KITTI Depth: 652 Test Images
file_path = None
# ------------------------------------------------------------- #
# Evaluation based on Disparity Images (Eval Tool: MonoDepth) #
# ------------------------------------------------------------- #
if (args.mode == 'train' or args.mode == 'test') and test_split == '': # Default
file_path = 'data/' + self.name + '_' + mode + '.txt' if test_file_path == '' else test_file_path
elif args.mode == 'test' and args.eval_tool == 'monodepth':
if test_split == 'kitti_stereo':
file_path = 'data/kitti_stereo_2015_test_files.txt'
# Overwrite the 'dataset_path' specified by the dataset
self.dataset_path = self.get_dataset_root() + 'kitti/stereo/stereo2015/data_scene_flow/'
elif test_split == 'eigen':
file_path = 'data/eigen_test_files.txt'
# Overwrite the 'dataset_path' specified by the dataset
self.dataset_path = self.get_dataset_root() + 'kitti/raw_data/'
elif test_split == 'eigen_kitti_depth':
file_path = 'data/eigen_test_kitti_depth_files.txt'
# Overwrite the 'dataset_path' specified by the dataset
self.dataset_path = self.get_dataset_root() + 'kitti/'
else:
raise ValueError('')
# --------------------------------------------------------------------------------- #
# Evaluation based on Ground Truth/Velodyne Scans Images (Eval Tool: KITTI Depth) #
# --------------------------------------------------------------------------------- #
elif args.mode == 'test' and args.eval_tool == 'kitti_depth':
if test_split == 'kitti_stereo' or test_split == 'eigen': # FIXME:
raise NotImplementedError("Não deveria rodar! Terminar Implementação. Devo gerar os mapas de profundidade para que possa ser avaliado.")
elif test_split == 'eigen_kitti_depth':
file_path = 'data/new_splits/eigen_split_based_on_kitti_depth/eigen_test_kitti_depth_files.txt'
# Overwrite the 'dataset_path' specified by the dataset
self.dataset_path = self.get_dataset_root() + 'kitti/'
args.test_file_path = file_path
return file_path
| 3,058 | 605 | 22 |
d4e51eb8d2da662f350df6fd566a070f7504549d | 4,842 | py | Python | pptx/opc/oxml.py | just4jc/python-pptx | ec433085d84d48b5539c379e52eb3c279ab2cbc0 | [
"MIT"
] | 169 | 2016-12-18T16:37:48.000Z | 2022-03-08T11:37:49.000Z | lib-linux_x64/pptx/opc/oxml.py | Element84/lambda-text-extractor | 6da52d077a2fc571e38bfe29c33ae68f6443cd5a | [
"Apache-2.0"
] | 26 | 2017-06-08T01:45:36.000Z | 2021-09-23T19:13:40.000Z | lib-linux_x64/pptx/opc/oxml.py | Element84/lambda-text-extractor | 6da52d077a2fc571e38bfe29c33ae68f6443cd5a | [
"Apache-2.0"
] | 42 | 2016-12-23T03:27:12.000Z | 2021-10-01T13:46:21.000Z | # encoding: utf-8
"""
Temporary stand-in for main oxml module that came across with the
PackageReader transplant. Probably much will get replaced with objects from
the pptx.oxml.core and then this module will either get deleted or only hold
the package related custom element classes.
"""
from __future__ import absolute_import
from lxml import etree
from .constants import NAMESPACE as NS, RELATIONSHIP_TARGET_MODE as RTM
from ..oxml import parse_xml, register_element_cls
from ..oxml.simpletypes import (
ST_ContentType, ST_Extension, ST_TargetMode, XsdAnyUri, XsdId
)
from ..oxml.xmlchemy import (
BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrMore
)
nsmap = {
'ct': NS.OPC_CONTENT_TYPES,
'pr': NS.OPC_RELATIONSHIPS,
'r': NS.OFC_RELATIONSHIPS,
}
class CT_Default(BaseOxmlElement):
"""
``<Default>`` element, specifying the default content type to be applied
to a part with the specified extension.
"""
extension = RequiredAttribute('Extension', ST_Extension)
contentType = RequiredAttribute('ContentType', ST_ContentType)
class CT_Override(BaseOxmlElement):
"""
``<Override>`` element, specifying the content type to be applied for a
part with the specified partname.
"""
partName = RequiredAttribute('PartName', XsdAnyUri)
contentType = RequiredAttribute('ContentType', ST_ContentType)
class CT_Relationship(BaseOxmlElement):
"""
``<Relationship>`` element, representing a single relationship from a
source to a target part.
"""
rId = RequiredAttribute('Id', XsdId)
reltype = RequiredAttribute('Type', XsdAnyUri)
target_ref = RequiredAttribute('Target', XsdAnyUri)
targetMode = OptionalAttribute(
'TargetMode', ST_TargetMode, default=RTM.INTERNAL
)
@classmethod
def new(cls, rId, reltype, target, target_mode=RTM.INTERNAL):
"""
Return a new ``<Relationship>`` element.
"""
xml = '<Relationship xmlns="%s"/>' % nsmap['pr']
relationship = parse_xml(xml)
relationship.rId = rId
relationship.reltype = reltype
relationship.target_ref = target
relationship.targetMode = target_mode
return relationship
class CT_Relationships(BaseOxmlElement):
"""
``<Relationships>`` element, the root element in a .rels file.
"""
relationship = ZeroOrMore('pr:Relationship')
def add_rel(self, rId, reltype, target, is_external=False):
"""
Add a child ``<Relationship>`` element with attributes set according
to parameter values.
"""
target_mode = RTM.EXTERNAL if is_external else RTM.INTERNAL
relationship = CT_Relationship.new(rId, reltype, target, target_mode)
self._insert_relationship(relationship)
@classmethod
def new(cls):
"""
Return a new ``<Relationships>`` element.
"""
xml = '<Relationships xmlns="%s"/>' % nsmap['pr']
relationships = parse_xml(xml)
return relationships
@property
def xml(self):
"""
Return XML string for this element, suitable for saving in a .rels
stream, not pretty printed and with an XML declaration at the top.
"""
return oxml_tostring(self, encoding='UTF-8', standalone=True)
class CT_Types(BaseOxmlElement):
"""
``<Types>`` element, the container element for Default and Override
elements in [Content_Types].xml.
"""
default = ZeroOrMore('ct:Default')
override = ZeroOrMore('ct:Override')
def add_default(self, ext, content_type):
"""
Add a child ``<Default>`` element with attributes set to parameter
values.
"""
return self._add_default(extension=ext, contentType=content_type)
def add_override(self, partname, content_type):
"""
Add a child ``<Override>`` element with attributes set to parameter
values.
"""
return self._add_override(
partName=partname, contentType=content_type
)
@classmethod
def new(cls):
"""
Return a new ``<Types>`` element.
"""
xml = '<Types xmlns="%s"/>' % nsmap['ct']
types = parse_xml(xml)
return types
register_element_cls('ct:Default', CT_Default)
register_element_cls('ct:Override', CT_Override)
register_element_cls('ct:Types', CT_Types)
register_element_cls('pr:Relationship', CT_Relationship)
register_element_cls('pr:Relationships', CT_Relationships)
| 30.2625 | 77 | 0.673689 | # encoding: utf-8
"""
Temporary stand-in for main oxml module that came across with the
PackageReader transplant. Probably much will get replaced with objects from
the pptx.oxml.core and then this module will either get deleted or only hold
the package related custom element classes.
"""
from __future__ import absolute_import
from lxml import etree
from .constants import NAMESPACE as NS, RELATIONSHIP_TARGET_MODE as RTM
from ..oxml import parse_xml, register_element_cls
from ..oxml.simpletypes import (
ST_ContentType, ST_Extension, ST_TargetMode, XsdAnyUri, XsdId
)
from ..oxml.xmlchemy import (
BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrMore
)
nsmap = {
'ct': NS.OPC_CONTENT_TYPES,
'pr': NS.OPC_RELATIONSHIPS,
'r': NS.OFC_RELATIONSHIPS,
}
def oxml_tostring(elm, encoding=None, pretty_print=False, standalone=None):
return etree.tostring(
elm, encoding=encoding, pretty_print=pretty_print,
standalone=standalone
)
def serialize_part_xml(part_elm):
xml = etree.tostring(part_elm, encoding='UTF-8', standalone=True)
return xml
class CT_Default(BaseOxmlElement):
"""
``<Default>`` element, specifying the default content type to be applied
to a part with the specified extension.
"""
extension = RequiredAttribute('Extension', ST_Extension)
contentType = RequiredAttribute('ContentType', ST_ContentType)
class CT_Override(BaseOxmlElement):
"""
``<Override>`` element, specifying the content type to be applied for a
part with the specified partname.
"""
partName = RequiredAttribute('PartName', XsdAnyUri)
contentType = RequiredAttribute('ContentType', ST_ContentType)
class CT_Relationship(BaseOxmlElement):
"""
``<Relationship>`` element, representing a single relationship from a
source to a target part.
"""
rId = RequiredAttribute('Id', XsdId)
reltype = RequiredAttribute('Type', XsdAnyUri)
target_ref = RequiredAttribute('Target', XsdAnyUri)
targetMode = OptionalAttribute(
'TargetMode', ST_TargetMode, default=RTM.INTERNAL
)
@classmethod
def new(cls, rId, reltype, target, target_mode=RTM.INTERNAL):
"""
Return a new ``<Relationship>`` element.
"""
xml = '<Relationship xmlns="%s"/>' % nsmap['pr']
relationship = parse_xml(xml)
relationship.rId = rId
relationship.reltype = reltype
relationship.target_ref = target
relationship.targetMode = target_mode
return relationship
class CT_Relationships(BaseOxmlElement):
"""
``<Relationships>`` element, the root element in a .rels file.
"""
relationship = ZeroOrMore('pr:Relationship')
def add_rel(self, rId, reltype, target, is_external=False):
"""
Add a child ``<Relationship>`` element with attributes set according
to parameter values.
"""
target_mode = RTM.EXTERNAL if is_external else RTM.INTERNAL
relationship = CT_Relationship.new(rId, reltype, target, target_mode)
self._insert_relationship(relationship)
@classmethod
def new(cls):
"""
Return a new ``<Relationships>`` element.
"""
xml = '<Relationships xmlns="%s"/>' % nsmap['pr']
relationships = parse_xml(xml)
return relationships
@property
def xml(self):
"""
Return XML string for this element, suitable for saving in a .rels
stream, not pretty printed and with an XML declaration at the top.
"""
return oxml_tostring(self, encoding='UTF-8', standalone=True)
class CT_Types(BaseOxmlElement):
"""
``<Types>`` element, the container element for Default and Override
elements in [Content_Types].xml.
"""
default = ZeroOrMore('ct:Default')
override = ZeroOrMore('ct:Override')
def add_default(self, ext, content_type):
"""
Add a child ``<Default>`` element with attributes set to parameter
values.
"""
return self._add_default(extension=ext, contentType=content_type)
def add_override(self, partname, content_type):
"""
Add a child ``<Override>`` element with attributes set to parameter
values.
"""
return self._add_override(
partName=partname, contentType=content_type
)
@classmethod
def new(cls):
"""
Return a new ``<Types>`` element.
"""
xml = '<Types xmlns="%s"/>' % nsmap['ct']
types = parse_xml(xml)
return types
register_element_cls('ct:Default', CT_Default)
register_element_cls('ct:Override', CT_Override)
register_element_cls('ct:Types', CT_Types)
register_element_cls('pr:Relationship', CT_Relationship)
register_element_cls('pr:Relationships', CT_Relationships)
| 273 | 0 | 46 |
71b11a4cceda2ece9f74987229878f74763c8bdf | 10,797 | py | Python | fc_treeobs/training_2ts.py | giulic3/flatland-challenge-marl | 391197188c9ddf56cfac7a03f48bb3bbf8e53dd5 | [
"MIT"
] | 9 | 2020-05-02T15:55:17.000Z | 2021-12-30T07:33:50.000Z | fc_treeobs/training_2ts.py | giulic3/flatland-challenge-marl | 391197188c9ddf56cfac7a03f48bb3bbf8e53dd5 | [
"MIT"
] | 2 | 2020-02-20T12:41:40.000Z | 2021-04-01T10:40:23.000Z | fc_treeobs/training_2ts.py | giulic3/flatland-challenge-marl | 391197188c9ddf56cfac7a03f48bb3bbf8e53dd5 | [
"MIT"
] | 1 | 2020-06-03T09:41:59.000Z | 2020-06-03T09:41:59.000Z | # Import packages for plotting and system
import getopt
import random
import sys
from collections import deque
# make sure the root path is in system path
from pathlib import Path
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
from importlib_resources import path
import matplotlib.pyplot as plt
import numpy as np
import torch
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
#from flatland.utils.rendertools import RenderTool
import fc_treeobs.nets
from fc_treeobs.dueling_double_dqn import Agent
from fc_treeobs.utils import norm_obs_clip, split_tree_into_feature_groups
if __name__ == '__main__':
main(sys.argv[1:])
| 42.341176 | 155 | 0.576456 | # Import packages for plotting and system
import getopt
import random
import sys
from collections import deque
# make sure the root path is in system path
from pathlib import Path
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
from importlib_resources import path
import matplotlib.pyplot as plt
import numpy as np
import torch
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
#from flatland.utils.rendertools import RenderTool
import fc_treeobs.nets
from fc_treeobs.dueling_double_dqn import Agent
from fc_treeobs.utils import norm_obs_clip, split_tree_into_feature_groups
def main(argv):
try:
opts, args = getopt.getopt(argv, "n:", ["n_episodes="])
except getopt.GetoptError:
print('training_2ts.py -n <n_episodes>')
sys.exit(2)
for opt, arg in opts:
if opt in ('-n', '--n_episodes'):
n_episodes = int(arg)
## Initialize the random
random.seed(1)
np.random.seed(1)
# Preload an agent
training = False
# Initialize a random map with a random number of agents
x_dim = np.random.randint(20, 40)
y_dim = np.random.randint(20, 40)
n_agents = np.random.randint(3, 8)
n_goals = n_agents + np.random.randint(0, 3)
min_dist = int(0.75 * min(x_dim, y_dim))
tree_depth = 3
# Use a the malfunction generator to break agents from time to time
stochastic_data = {'prop_malfunction': 0.05, # Percentage of defective agents
'malfunction_rate': 50, # Rate of malfunction occurrence
'min_duration': 3, # Minimal duration of malfunction
'max_duration': 20 # Max duration of malfunction
}
# Different agent types (trains) with different speeds.
speed_ration_map = {1.: 0.25, # Fast passenger train
1. / 2.: 0.25, # Fast freight train
1. / 3.: 0.25, # Slow commuter train
1. / 4.: 0.25} # Slow freight train
# Get an observation builder and predictor
observation_helper = TreeObsForRailEnv(max_depth=tree_depth, predictor=ShortestPathPredictorForRailEnv())
env = RailEnv(width=x_dim,
height=y_dim,
rail_generator=sparse_rail_generator(max_num_cities=3,
# Number of cities in map (where train stations are)
seed=1, # Random seed
grid_mode=False,
max_rails_between_cities=2,
max_rails_in_city=3),
schedule_generator=sparse_schedule_generator(speed_ration_map),
number_of_agents=n_agents,
stochastic_data=stochastic_data, # Malfunction data generator
obs_builder_object=observation_helper)
env.reset(True, True)
#env_renderer = RenderTool(env, gl="PILSVG", )
handle = env.get_agent_handles()
features_per_node = env.obs_builder.observation_dim
nr_nodes = 0
for i in range(tree_depth + 1):
nr_nodes += np.power(4, i)
state_size = 2 * features_per_node * nr_nodes # We will use two time steps per observation --> 2x state_size
action_size = 5
# We set the number of episodes we would like to train on
if 'n_episodes' not in locals():
n_episodes = 15000
# Set max number of steps per episode as well as other training relevant parameter
max_steps = int(3 * (env.height + env.width))
eps = 1.
eps_end = 0.005
eps_decay = 0.9995
action_dict = dict()
final_action_dict = dict()
scores_window = deque(maxlen=100)
done_window = deque(maxlen=100)
time_obs = deque(maxlen=2)
scores = []
dones_list = []
action_prob = [0] * action_size
agent_obs = [None] * env.get_num_agents()
agent_next_obs = [None] * env.get_num_agents()
# Initialize the agent
agent = Agent(state_size, action_size)
# Here you can pre-load an agent
if training:
with path(fc_treeobs.nets, "avoid_checkpoint6000_round1generators.pth") as file_in:
agent.qnetwork_local.load_state_dict(torch.load(file_in))
# Do training over n_episodes
for episodes in range(1, n_episodes + 1):
"""
Training Curriculum: In order to get good generalization we change the number of agents
and the size of the levels every 50 episodes.
"""
if episodes % 50 == 0:
x_dim = np.random.randint(20, 40)
y_dim = np.random.randint(20, 40)
n_agents = np.random.randint(3, 8)
n_goals = n_agents + np.random.randint(0, 3)
min_dist = int(0.75 * min(x_dim, y_dim))
env = RailEnv(width=x_dim,
height=y_dim,
rail_generator=sparse_rail_generator(max_num_cities=3,
# Number of cities in map (where train stations are)
seed=1, # Random seed
grid_mode=False,
max_rails_between_cities=2,
max_rails_in_city=3),
schedule_generator=sparse_schedule_generator(speed_ration_map),
number_of_agents=n_agents,
stochastic_data=stochastic_data, # Malfunction data generator
obs_builder_object=observation_helper)
# Adjust the parameters according to the new env.
max_steps = int(3 * (env.height + env.width))
agent_obs = [None] * env.get_num_agents()
agent_next_obs = [None] * env.get_num_agents()
# Reset environment
obs, info = env.reset(True, True)
#env_renderer.reset()
# Setup placeholder for finals observation of a single agent. This is necessary because agents terminate at
# different times during an episode
final_obs = agent_obs.copy()
final_obs_next = agent_next_obs.copy()
# Build agent specific observations
for a in range(env.get_num_agents()):
data, distance, agent_data = split_tree_into_feature_groups(obs[a], tree_depth)
data = norm_obs_clip(data)
distance = norm_obs_clip(distance)
agent_data = np.clip(agent_data, -1, 1)
obs[a] = np.concatenate((np.concatenate((data, distance)), agent_data))
# Accumulate two time steps of observation (Here just twice the first state)
for i in range(2):
time_obs.append(obs)
# Build the agent specific double ti
for a in range(env.get_num_agents()):
agent_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
score = 0
env_done = 0
# Run episode
for step in range(max_steps):
# Action
for a in range(env.get_num_agents()):
# action = agent.act(np.array(obs[a]), eps=eps)
action = agent.act(agent_obs[a], eps=eps)
action_prob[action] += 1
action_dict.update({a: action})
# Environment step
next_obs, all_rewards, done, _ = env.step(action_dict)
#env_renderer.render_env(show=True, show_predictions=True, show_observations=False)
# Normalize observations
for a in range(env.get_num_agents()):
data, distance, agent_data = split_tree_into_feature_groups(next_obs[a], tree_depth)
data = norm_obs_clip(data)
distance = norm_obs_clip(distance)
agent_data = np.clip(agent_data, -1, 1)
next_obs[a] = np.concatenate((np.concatenate((data, distance)), agent_data))
time_obs.append(next_obs)
# Update replay buffer and train agent
for a in range(env.get_num_agents()):
agent_next_obs[a] = np.concatenate((time_obs[0][a], time_obs[1][a]))
if done[a]:
final_obs[a] = agent_obs[a].copy()
final_obs_next[a] = agent_next_obs[a].copy()
final_action_dict.update({a: action_dict[a]})
if not done[a]:
agent.step(agent_obs[a], action_dict[a], all_rewards[a], agent_next_obs[a], done[a])
score += all_rewards[a] / env.get_num_agents()
agent_obs = agent_next_obs.copy()
if done['__all__']:
env_done = 1
for a in range(env.get_num_agents()):
agent.step(final_obs[a], final_action_dict[a], all_rewards[a], final_obs_next[a], done[a])
break
# Epsilon decay
eps = max(eps_end, eps_decay * eps) # decrease epsilon
done_window.append(env_done)
scores_window.append(score / max_steps) # save most recent score
scores.append(np.mean(scores_window))
dones_list.append((np.mean(done_window)))
# Print training results info
print(
'\rTraining {} Agents on ({},{}).\t Episode {}\t Average Score: {:.3f}\tDones: {:.2f}%\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
env.get_num_agents(), x_dim, y_dim,
episodes,
np.mean(scores_window),
100 * np.mean(done_window),
eps, action_prob / np.sum(action_prob)), end=" ")
if episodes % 100 == 0:
print(
'\rTraining {} Agents.\t Episode {}\t Average Score: {:.3f}\tDones: {:.2f}%\tEpsilon: {:.2f} \t Action Probabilities: \t {}'.format(
env.get_num_agents(),
episodes,
np.mean(scores_window),
100 * np.mean(done_window),
eps,
action_prob / np.sum(action_prob)))
torch.save(agent.qnetwork_local.state_dict(),
'./nets/avoid_checkpoint' + str(episodes) + '.pth')
action_prob = [1] * action_size
plt.plot(scores)
plt.show()
if __name__ == '__main__':
main(sys.argv[1:])
| 9,851 | 0 | 23 |
779a475a82160f2b8759dd2421f5612dcbacf302 | 1,245 | py | Python | portfolio_model.py | mr-pablinho/sobol_codes | b25a5f2f387ba48162b8b6597c80b79f5066de3b | [
"MIT"
] | null | null | null | portfolio_model.py | mr-pablinho/sobol_codes | b25a5f2f387ba48162b8b6597c80b79f5066de3b | [
"MIT"
] | null | null | null | portfolio_model.py | mr-pablinho/sobol_codes | b25a5f2f387ba48162b8b6597c80b79f5066de3b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 10:38:40 2021
PORTFOLIO MODEL - WHY GLOBAL SENSITIVITY?
@author: PMR
"""
# %% Import libraries
import chaospy as cp
import numpy as np
import matplotlib.pyplot as plt
# %% Portfolio model
# %% Setup problem
Q1_mean = 0
Q1_std = 1
Q2_mean = 0
Q2_std = 3
c1 = 2
c2 = 1
Q1_distro = cp.Normal(Q1_mean, Q1_std)
Q2_distro = cp.Normal(Q2_mean, Q2_std)
np.random.seed(1)
nSamples = 1000
J_distro = cp.J(Q1_distro, Q2_distro)
samples = J_distro.sample(nSamples).T
# %% Evaluate the model
Y_all = []
for i in range(nSamples):
Q1, Q2 = samples[i,0], samples[i,1]
Y = portfolio_model(c1, c2, Q1, Q2)
Y_all.append(Y)
# %% Plots
plt.figure('q1 v. y')
plt.title('q1 v. y')
plt.scatter(samples[:,0], Y_all, s=5, color='blue', alpha=0.5)
plt.grid(alpha=0.3)
plt.xlim(-15,15)
plt.ylim(-15,15)
plt.figure('q2 v. y')
plt.title('q2 v. y')
plt.scatter(samples[:,1], Y_all, s=5, color='blue', alpha=0.5)
plt.grid(alpha=0.3)
plt.xlim(-15,15)
plt.ylim(-15,15)
| 18.043478 | 63 | 0.605622 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 18 10:38:40 2021
PORTFOLIO MODEL - WHY GLOBAL SENSITIVITY?
@author: PMR
"""
# %% Import libraries
import chaospy as cp
import numpy as np
import matplotlib.pyplot as plt
# %% Portfolio model
def portfolio_model(c1, c2, Q1, Q2):
Y = (c1 * Q1) + (c2 * Q2)
# Q1 and Q2 hedged portfolios
# c1 and c2 amounts invested
return Y
# %% Setup problem
Q1_mean = 0
Q1_std = 1
Q2_mean = 0
Q2_std = 3
c1 = 2
c2 = 1
Q1_distro = cp.Normal(Q1_mean, Q1_std)
Q2_distro = cp.Normal(Q2_mean, Q2_std)
np.random.seed(1)
nSamples = 1000
J_distro = cp.J(Q1_distro, Q2_distro)
samples = J_distro.sample(nSamples).T
# %% Evaluate the model
Y_all = []
for i in range(nSamples):
Q1, Q2 = samples[i,0], samples[i,1]
Y = portfolio_model(c1, c2, Q1, Q2)
Y_all.append(Y)
# %% Plots
plt.figure('q1 v. y')
plt.title('q1 v. y')
plt.scatter(samples[:,0], Y_all, s=5, color='blue', alpha=0.5)
plt.grid(alpha=0.3)
plt.xlim(-15,15)
plt.ylim(-15,15)
plt.figure('q2 v. y')
plt.title('q2 v. y')
plt.scatter(samples[:,1], Y_all, s=5, color='blue', alpha=0.5)
plt.grid(alpha=0.3)
plt.xlim(-15,15)
plt.ylim(-15,15)
| 130 | 0 | 25 |
b7aec17202a7a190f3f76b71f0d6f9e849148f6b | 508 | py | Python | adwords_client/adwords_api/managed_customer_service.py | getninjas/adwords-client | 021473f9b2b9aa5d17ae53c364304ef5c2bf7547 | [
"Apache-2.0"
] | 8 | 2017-08-17T12:03:36.000Z | 2019-06-05T17:18:44.000Z | adwords_client/adwords_api/managed_customer_service.py | getninjas/adwords-client | 021473f9b2b9aa5d17ae53c364304ef5c2bf7547 | [
"Apache-2.0"
] | 12 | 2017-07-13T17:17:53.000Z | 2021-02-08T20:11:57.000Z | adwords_client/adwords_api/managed_customer_service.py | getninjas/adwords-client | 021473f9b2b9aa5d17ae53c364304ef5c2bf7547 | [
"Apache-2.0"
] | 4 | 2017-07-13T16:54:00.000Z | 2019-02-22T07:38:11.000Z | from . import common as cm
# def mutate(self, client_customer_id=None, sync=None):
# if client_customer_id:
# self.client.SetClientCustomerId(client_customer_id)
# result = self.service.mutate(self.helper.operations)
# for item in result.value:
# item['returnType'] = 'ManagedCustomer'
# return result.value
| 33.866667 | 65 | 0.67126 | from . import common as cm
class ManagedCustomerService(cm.BaseService):
def __init__(self, client):
super().__init__(client, 'ManagedCustomerService')
# def mutate(self, client_customer_id=None, sync=None):
# if client_customer_id:
# self.client.SetClientCustomerId(client_customer_id)
# result = self.service.mutate(self.helper.operations)
# for item in result.value:
# item['returnType'] = 'ManagedCustomer'
# return result.value
| 65 | 24 | 49 |
cdda149dab8fa06d0758f2ccf8191c23e949bbb4 | 374 | py | Python | tilapia/lib/wallet/utils.py | huazhouwang/python_multichain_wallet | 52e0acdc2984c08990cb36433ef17a414fbe8312 | [
"MIT"
] | 2 | 2021-09-23T13:47:08.000Z | 2021-09-24T02:39:14.000Z | tilapia/lib/wallet/utils.py | huazhouwang/tilapia | 52e0acdc2984c08990cb36433ef17a414fbe8312 | [
"MIT"
] | null | null | null | tilapia/lib/wallet/utils.py | huazhouwang/tilapia | 52e0acdc2984c08990cb36433ef17a414fbe8312 | [
"MIT"
] | null | null | null | import json
| 34 | 90 | 0.740642 | import json
def decrypt_eth_keystore(keyfile_json: str, keystore_password: str) -> bytes:
try:
import eth_account
return bytes(eth_account.account.Account.decrypt(keyfile_json, keystore_password))
except (TypeError, KeyError, NotImplementedError, json.decoder.JSONDecodeError) as e:
raise Exception(f"Invalid keystore. error: {e}") from e
| 338 | 0 | 23 |
69c8866c559c56ac98d72cb0de641a39413b10cc | 910 | py | Python | openjij/utils/graph_utils.py | 29rou/OpenJij | c2579fba8710cf82b9e6761304f0042b365b595c | [
"Apache-2.0"
] | 61 | 2019-01-05T13:37:10.000Z | 2022-03-11T02:11:08.000Z | openjij/utils/graph_utils.py | OpenJij/OpenJij | 9ed58500ef47583bc472410d470bb2dd4bfec74a | [
"Apache-2.0"
] | 79 | 2019-01-29T09:55:20.000Z | 2022-02-19T04:06:20.000Z | openjij/utils/graph_utils.py | 29rou/OpenJij | c2579fba8710cf82b9e6761304f0042b365b595c | [
"Apache-2.0"
] | 21 | 2019-01-07T07:55:10.000Z | 2022-03-08T14:27:23.000Z | import numpy as np
def qubo_to_ising(mat: np.ndarray):
"""inplace-convert numpy matrix from qubo to ising.
Args:
mat (np.ndarray): numpy matrix
"""
mat /= 4
for i in range(mat.shape[0]):
mat[i, i] += np.sum(mat[i, :])
def chimera_to_ind(r: int, c: int, z: int, L: int):
"""[summary]
Args:
r (int): row index
c (int): column index
z (int): in-chimera index (must be from 0 to 7)
L (int): height and width of chimera-units (total number of spins is :math:`L \\times L \\times 8`)
Raises:
ValueError: [description]
Returns:
int: corresponding Chimera index
"""
if not (0 <= r < L and 0 <= c < L and 0 <= z < 8):
raise ValueError(
'0 <= r < L or 0 <= c < L or 0 <= z < 8. '
'your input r={}, c={}, z={}, L={}'.format(r, c, z, L))
return r * L * 8 + c*8 + z
| 26 | 108 | 0.510989 | import numpy as np
def qubo_to_ising(mat: np.ndarray):
"""inplace-convert numpy matrix from qubo to ising.
Args:
mat (np.ndarray): numpy matrix
"""
mat /= 4
for i in range(mat.shape[0]):
mat[i, i] += np.sum(mat[i, :])
def chimera_to_ind(r: int, c: int, z: int, L: int):
"""[summary]
Args:
r (int): row index
c (int): column index
z (int): in-chimera index (must be from 0 to 7)
L (int): height and width of chimera-units (total number of spins is :math:`L \\times L \\times 8`)
Raises:
ValueError: [description]
Returns:
int: corresponding Chimera index
"""
if not (0 <= r < L and 0 <= c < L and 0 <= z < 8):
raise ValueError(
'0 <= r < L or 0 <= c < L or 0 <= z < 8. '
'your input r={}, c={}, z={}, L={}'.format(r, c, z, L))
return r * L * 8 + c*8 + z
| 0 | 0 | 0 |
1cbd4e735a4e592852b86fb40a4872bdcb02c9c0 | 263 | py | Python | demos/demo_morse/demo_morse_fourier.py | WaveBlocks/WaveBlocks | 2af3730dcf27e54006ec602e696b4d4df25459d8 | [
"BSD-3-Clause"
] | null | null | null | demos/demo_morse/demo_morse_fourier.py | WaveBlocks/WaveBlocks | 2af3730dcf27e54006ec602e696b4d4df25459d8 | [
"BSD-3-Clause"
] | null | null | null | demos/demo_morse/demo_morse_fourier.py | WaveBlocks/WaveBlocks | 2af3730dcf27e54006ec602e696b4d4df25459d8 | [
"BSD-3-Clause"
] | null | null | null | algorithm = "fourier"
potential = "morse"
D = 3
a = 0.3
T = 10
dt = 0.005
eps = 0.2
f = 4.0
ngn = 4096
basis_size = 4
leading_component = 0
P = 1.0j
Q = 1
S = 0.0
p = 0.0
q = 1.5
parameters = [ (P, Q, S, p, q) ]
coefficients = [[(0, 1.0)]]
write_nth = 20
| 9.392857 | 32 | 0.547529 | algorithm = "fourier"
potential = "morse"
D = 3
a = 0.3
T = 10
dt = 0.005
eps = 0.2
f = 4.0
ngn = 4096
basis_size = 4
leading_component = 0
P = 1.0j
Q = 1
S = 0.0
p = 0.0
q = 1.5
parameters = [ (P, Q, S, p, q) ]
coefficients = [[(0, 1.0)]]
write_nth = 20
| 0 | 0 | 0 |
4caae3c9d8a9c19c3fba5639c8067e15e2c9eac7 | 8,055 | py | Python | src/cauliflowervest/client/mac/corestorage.py | j-lowry/cauliflowervest | 444d7065293112edfc7ea2deb1d27f9b4047828b | [
"Apache-2.0"
] | null | null | null | src/cauliflowervest/client/mac/corestorage.py | j-lowry/cauliflowervest | 444d7065293112edfc7ea2deb1d27f9b4047828b | [
"Apache-2.0"
] | null | null | null | src/cauliflowervest/client/mac/corestorage.py | j-lowry/cauliflowervest | 444d7065293112edfc7ea2deb1d27f9b4047828b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Core storage related features."""
import logging
from cauliflowervest.client import util
DISKUTIL = '/usr/sbin/diskutil'
class Error(Exception):
"""Base error."""
class CouldNotUnlockError(Error):
"""Could not unlock volume error."""
class CouldNotRevertError(Error):
"""Could not revert volume error."""
class VolumeNotEncryptedError(Error):
"""Volume is not encrypted error."""
class State(object):
"""Fake enum to represent the possible states of core storage."""
ENABLED = 'CORE_STORAGE_STATE_ENABLED'
ENCRYPTED = 'CORE_STORAGE_STATE_ENCRYPTED'
FAILED = 'CORE_STORAGE_STATE_FAILED'
NONE = 'CORE_STORAGE_STATE_NONE'
UNKNOWN = 'CORE_STORAGE_STATE_UNKNOWN'
def IsBootVolumeEncrypted():
"""Returns True if the boot volume (/) is encrypted, False otherwise."""
try:
csinfo_plist = util.GetPlistFromExec(
(DISKUTIL, 'cs', 'info', '-plist', '/'))
except util.ExecError:
return False # Non-zero return means / volume isn't a CoreStorage volume.
lvf_uuid = csinfo_plist.get('MemberOfCoreStorageLogicalVolumeFamily')
if lvf_uuid:
try:
lvf_info_plist = util.GetPlistFromExec(
(DISKUTIL, 'cs', 'info', '-plist', lvf_uuid))
except util.ExecError:
return False # Couldn't get info on Logical Volume Family UUID.
return lvf_info_plist.get(
'CoreStorageLogicalVolumeFamilyEncryptionType') == 'AES-XTS'
return False
def GetRecoveryPartition():
"""Determine the location of the recovery partition.
Returns:
str, like "/dev/disk0s3" where the recovery partition is, OR
None, if no recovery partition exists or cannot be detected.
"""
try:
disklist_plist = util.GetPlistFromExec((DISKUTIL, 'list', '-plist'))
except util.ExecError:
logging.exception('GetRecoveryPartition() failed to get partition list.')
return
alldisks = disklist_plist.get('AllDisksAndPartitions', [])
for disk in alldisks:
partitions = disk.get('Partitions', [])
for partition in partitions:
if partition.get('VolumeName') == 'Recovery HD':
return '/dev/%s' % partition['DeviceIdentifier']
def GetCoreStoragePlist(uuid=None):
"""Returns a dict of diskutil cs info plist for a given str CoreStorage uuid.
Args:
uuid: str, optional, CoreStorage uuid. If no uuid is provided, this function
returns a diskutil cs list plist..
Returns:
A dict of diskutil cs info/list -plist output.
Raises:
Error: The given uuid was invalid or there was a diskutil error.
"""
if uuid:
if not util.UuidIsValid(uuid):
raise Error
cmd = [DISKUTIL, 'corestorage', 'info', '-plist', uuid]
else:
cmd = [DISKUTIL, 'corestorage', 'list', '-plist']
try:
return util.GetPlistFromExec(cmd)
except util.ExecError:
raise Error
def GetStateAndVolumeIds():
"""Determine the state of core storage and the volume IDs (if any).
In the case that core storage is enabled, it is required that every present
volume is encrypted, to return "encrypted" status (i.e. the entire drive is
encrypted, for all present drives). Otherwise ENABLED or FAILED state is
returned.
Returns:
tuple: (State, [list; str encrypted UUIDs], [list; str unencrypted UUIDs])
Raises:
Error: there was a problem getting the corestorage list, or family info.
"""
state = State.NONE
volume_ids = []
encrypted_volume_ids = []
failed_volume_ids = []
cs_plist = GetCoreStoragePlist()
groups = cs_plist.get('CoreStorageLogicalVolumeGroups', [])
if groups:
state = State.ENABLED
for group in groups:
for family in group.get('CoreStorageLogicalVolumeFamilies', []):
family_plist = GetCoreStoragePlist(family['CoreStorageUUID'])
enc = family_plist.get('CoreStorageLogicalVolumeFamilyEncryptionType', '')
for volume in family['CoreStorageLogicalVolumes']:
volume_id = volume['CoreStorageUUID']
volume_plist = GetCoreStoragePlist(volume_id)
conv_state = volume_plist.get(
'CoreStorageLogicalVolumeConversionState', '')
# Known states include: Pending, Converting, Complete, Failed.
if conv_state == 'Failed':
failed_volume_ids.append(volume_id)
elif enc == 'AES-XTS':
# If conv_state is not 'Failed' and enc is correct, consider the
# volume encrypted to include those that are still encrypting.
# A potential TODO might be to separate these.
encrypted_volume_ids.append(volume_id)
else:
volume_ids.append(volume_id)
if failed_volume_ids:
state = State.FAILED
elif encrypted_volume_ids and not volume_ids:
state = State.ENCRYPTED
# For now at least, consider "failed" volumes as encrypted, as the same
# actions are valid for such volumes. For example: revert.
encrypted_volume_ids.extend(failed_volume_ids)
return state, encrypted_volume_ids, volume_ids
def GetState():
"""Check if core storage is in place.
Returns:
One of the class properties of State.
"""
state, _, _ = GetStateAndVolumeIds()
return state
def GetVolumeSize(uuid, readable=True):
"""Return the size of the volume with the given UUID.
Args:
uuid: str, ID of the volume in question
readable: Optional boolean, default true: return a human-readable string
when true, otherwise int number of bytes.
Returns:
str or int, see "readable" arg.
Raises:
Error: there was a problem getting volume info.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
try:
plist = util.GetPlistFromExec(
(DISKUTIL, 'corestorage', 'info', '-plist', uuid))
except util.ExecError:
logging.exception('GetVolumeSize() failed to get volume info: %s', uuid)
raise Error
num_bytes = plist['CoreStorageLogicalVolumeSize']
if readable:
return '%.2f GiB' % (num_bytes / (1<<30))
else:
return num_bytes
def UnlockVolume(uuid, passphrase):
"""Unlock a core storage encrypted volume.
Args:
uuid: str, uuid of the volume to unlock.
passphrase: str, passphrase to unlock the volume.
Raises:
CouldNotUnlockError: the volume cannot be unlocked.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
returncode, _, stderr = util.Exec(
(DISKUTIL, 'corestorage', 'unlockVolume', uuid, '-stdinpassphrase'),
stdin=passphrase)
if (returncode != 0 and
'volume is not locked' not in stderr and
'is already unlocked' not in stderr):
raise CouldNotUnlockError(
'Could not unlock volume (%s).' % returncode)
def RevertVolume(uuid, passphrase):
"""Revert a core storage encrypted volume (to unencrypted state).
Args:
uuid: str, uuid of the volume to revert.
passphrase: str, passphrase to unlock the volume.
Raises:
CouldNotRevertError: the volume was unlocked, but cannot be reverted.
CouldNotUnlockError: the volume cannot be unlocked.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
UnlockVolume(uuid, passphrase)
returncode, _, _ = util.Exec(
(DISKUTIL, 'corestorage', 'revert', uuid, '-stdinpassphrase'),
stdin=passphrase)
if returncode != 0:
raise CouldNotRevertError('Could not revert volume (%s).' % returncode)
| 31.464844 | 80 | 0.701924 | #!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Core storage related features."""
import logging
from cauliflowervest.client import util
DISKUTIL = '/usr/sbin/diskutil'
class Error(Exception):
"""Base error."""
class CouldNotUnlockError(Error):
"""Could not unlock volume error."""
class CouldNotRevertError(Error):
"""Could not revert volume error."""
class VolumeNotEncryptedError(Error):
"""Volume is not encrypted error."""
class State(object):
"""Fake enum to represent the possible states of core storage."""
ENABLED = 'CORE_STORAGE_STATE_ENABLED'
ENCRYPTED = 'CORE_STORAGE_STATE_ENCRYPTED'
FAILED = 'CORE_STORAGE_STATE_FAILED'
NONE = 'CORE_STORAGE_STATE_NONE'
UNKNOWN = 'CORE_STORAGE_STATE_UNKNOWN'
def IsBootVolumeEncrypted():
"""Returns True if the boot volume (/) is encrypted, False otherwise."""
try:
csinfo_plist = util.GetPlistFromExec(
(DISKUTIL, 'cs', 'info', '-plist', '/'))
except util.ExecError:
return False # Non-zero return means / volume isn't a CoreStorage volume.
lvf_uuid = csinfo_plist.get('MemberOfCoreStorageLogicalVolumeFamily')
if lvf_uuid:
try:
lvf_info_plist = util.GetPlistFromExec(
(DISKUTIL, 'cs', 'info', '-plist', lvf_uuid))
except util.ExecError:
return False # Couldn't get info on Logical Volume Family UUID.
return lvf_info_plist.get(
'CoreStorageLogicalVolumeFamilyEncryptionType') == 'AES-XTS'
return False
def GetRecoveryPartition():
"""Determine the location of the recovery partition.
Returns:
str, like "/dev/disk0s3" where the recovery partition is, OR
None, if no recovery partition exists or cannot be detected.
"""
try:
disklist_plist = util.GetPlistFromExec((DISKUTIL, 'list', '-plist'))
except util.ExecError:
logging.exception('GetRecoveryPartition() failed to get partition list.')
return
alldisks = disklist_plist.get('AllDisksAndPartitions', [])
for disk in alldisks:
partitions = disk.get('Partitions', [])
for partition in partitions:
if partition.get('VolumeName') == 'Recovery HD':
return '/dev/%s' % partition['DeviceIdentifier']
def GetCoreStoragePlist(uuid=None):
"""Returns a dict of diskutil cs info plist for a given str CoreStorage uuid.
Args:
uuid: str, optional, CoreStorage uuid. If no uuid is provided, this function
returns a diskutil cs list plist..
Returns:
A dict of diskutil cs info/list -plist output.
Raises:
Error: The given uuid was invalid or there was a diskutil error.
"""
if uuid:
if not util.UuidIsValid(uuid):
raise Error
cmd = [DISKUTIL, 'corestorage', 'info', '-plist', uuid]
else:
cmd = [DISKUTIL, 'corestorage', 'list', '-plist']
try:
return util.GetPlistFromExec(cmd)
except util.ExecError:
raise Error
def GetStateAndVolumeIds():
"""Determine the state of core storage and the volume IDs (if any).
In the case that core storage is enabled, it is required that every present
volume is encrypted, to return "encrypted" status (i.e. the entire drive is
encrypted, for all present drives). Otherwise ENABLED or FAILED state is
returned.
Returns:
tuple: (State, [list; str encrypted UUIDs], [list; str unencrypted UUIDs])
Raises:
Error: there was a problem getting the corestorage list, or family info.
"""
state = State.NONE
volume_ids = []
encrypted_volume_ids = []
failed_volume_ids = []
cs_plist = GetCoreStoragePlist()
groups = cs_plist.get('CoreStorageLogicalVolumeGroups', [])
if groups:
state = State.ENABLED
for group in groups:
for family in group.get('CoreStorageLogicalVolumeFamilies', []):
family_plist = GetCoreStoragePlist(family['CoreStorageUUID'])
enc = family_plist.get('CoreStorageLogicalVolumeFamilyEncryptionType', '')
for volume in family['CoreStorageLogicalVolumes']:
volume_id = volume['CoreStorageUUID']
volume_plist = GetCoreStoragePlist(volume_id)
conv_state = volume_plist.get(
'CoreStorageLogicalVolumeConversionState', '')
# Known states include: Pending, Converting, Complete, Failed.
if conv_state == 'Failed':
failed_volume_ids.append(volume_id)
elif enc == 'AES-XTS':
# If conv_state is not 'Failed' and enc is correct, consider the
# volume encrypted to include those that are still encrypting.
# A potential TODO might be to separate these.
encrypted_volume_ids.append(volume_id)
else:
volume_ids.append(volume_id)
if failed_volume_ids:
state = State.FAILED
elif encrypted_volume_ids and not volume_ids:
state = State.ENCRYPTED
# For now at least, consider "failed" volumes as encrypted, as the same
# actions are valid for such volumes. For example: revert.
encrypted_volume_ids.extend(failed_volume_ids)
return state, encrypted_volume_ids, volume_ids
def GetState():
"""Check if core storage is in place.
Returns:
One of the class properties of State.
"""
state, _, _ = GetStateAndVolumeIds()
return state
def GetVolumeSize(uuid, readable=True):
"""Return the size of the volume with the given UUID.
Args:
uuid: str, ID of the volume in question
readable: Optional boolean, default true: return a human-readable string
when true, otherwise int number of bytes.
Returns:
str or int, see "readable" arg.
Raises:
Error: there was a problem getting volume info.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
try:
plist = util.GetPlistFromExec(
(DISKUTIL, 'corestorage', 'info', '-plist', uuid))
except util.ExecError:
logging.exception('GetVolumeSize() failed to get volume info: %s', uuid)
raise Error
num_bytes = plist['CoreStorageLogicalVolumeSize']
if readable:
return '%.2f GiB' % (num_bytes / (1<<30))
else:
return num_bytes
def UnlockVolume(uuid, passphrase):
"""Unlock a core storage encrypted volume.
Args:
uuid: str, uuid of the volume to unlock.
passphrase: str, passphrase to unlock the volume.
Raises:
CouldNotUnlockError: the volume cannot be unlocked.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
returncode, _, stderr = util.Exec(
(DISKUTIL, 'corestorage', 'unlockVolume', uuid, '-stdinpassphrase'),
stdin=passphrase)
if (returncode != 0 and
'volume is not locked' not in stderr and
'is already unlocked' not in stderr):
raise CouldNotUnlockError(
'Could not unlock volume (%s).' % returncode)
def RevertVolume(uuid, passphrase):
"""Revert a core storage encrypted volume (to unencrypted state).
Args:
uuid: str, uuid of the volume to revert.
passphrase: str, passphrase to unlock the volume.
Raises:
CouldNotRevertError: the volume was unlocked, but cannot be reverted.
CouldNotUnlockError: the volume cannot be unlocked.
ValueError: The UUID is formatted incorrectly.
"""
if not util.UuidIsValid(uuid):
raise ValueError('Invalid UUID: ' + uuid)
UnlockVolume(uuid, passphrase)
returncode, _, _ = util.Exec(
(DISKUTIL, 'corestorage', 'revert', uuid, '-stdinpassphrase'),
stdin=passphrase)
if returncode != 0:
raise CouldNotRevertError('Could not revert volume (%s).' % returncode)
| 0 | 0 | 0 |
dce35319a54555e1ca61086bdbfbd5b054b2e50e | 976 | py | Python | starmato/admin/templatetags/_fieldset_related.py | Depado/starmato-admin | 58452e0085ff06d6adee27c578a95ea5aca390d9 | [
"MIT"
] | 1 | 2015-11-08T10:13:32.000Z | 2015-11-08T10:13:32.000Z | starmato/admin/templatetags/_fieldset_related.py | Depado/starmato-admin | 58452e0085ff06d6adee27c578a95ea5aca390d9 | [
"MIT"
] | null | null | null | starmato/admin/templatetags/_fieldset_related.py | Depado/starmato-admin | 58452e0085ff06d6adee27c578a95ea5aca390d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
| 42.434783 | 122 | 0.697746 | # -*- coding: utf-8 -*-
def before_related(adminform):
adminform.fieldsets_before = adminform.fieldsets
adminform.fieldsets_after = []
try:
adminform.fieldsets_before = adminform.fieldsets[:adminform.fieldsets.index(('related_go_here', {'fields': []}))]
adminform.fieldsets_after = adminform.fieldsets[adminform.fieldsets.index(('related_go_here', {'fields': []}))+1:]
adminform.fieldsets = adminform.fieldsets_before
return adminform
except:
return adminform
def after_related(adminform):
try:
adminform.fieldsets = adminform.fieldsets_after
adminform.fieldsets_before = adminform.fieldsets[:adminform.fieldsets.index(('related_go_here', {'fields': []}))]
adminform.fieldsets_after = adminform.fieldsets[adminform.fieldsets.index(('related_go_here', {'fields': []}))+1:]
adminform.fieldsets = adminform.fieldsets_after
return adminform
except:
return adminform
| 906 | 0 | 45 |
be67b511064c47e06d1e759d9a1b90650fb4bff6 | 9,171 | py | Python | app/service/time_series_detector/detect_service.py | angry-tony/Metis | 79140ca056c6ae280e71efc40612087fa1df751b | [
"Apache-2.0"
] | null | null | null | app/service/time_series_detector/detect_service.py | angry-tony/Metis | 79140ca056c6ae280e71efc40612087fa1df751b | [
"Apache-2.0"
] | null | null | null | app/service/time_series_detector/detect_service.py | angry-tony/Metis | 79140ca056c6ae280e71efc40612087fa1df751b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making Metis available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
import time
import os
import threading
from app.dao.time_series_detector import anomaly_op
from app.dao.time_series_detector import sample_op
from app.dao.time_series_detector import train_op
from app.utils.utils import *
from app.service.time_series_detector.algorithm import isolation_forest, ewma, polynomial_interpolation, statistic, xgboosting
from app.config.errorcode import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), '../../model/time_series_detector/')
| 42.068807 | 305 | 0.586087 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making Metis available.
Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://opensource.org/licenses/BSD-3-Clause
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
import time
import os
import threading
from app.dao.time_series_detector import anomaly_op
from app.dao.time_series_detector import sample_op
from app.dao.time_series_detector import train_op
from app.utils.utils import *
from app.service.time_series_detector.algorithm import isolation_forest, ewma, polynomial_interpolation, statistic, xgboosting
from app.config.errorcode import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), '../../model/time_series_detector/')
class DetectService(object):
def __init__(self):
self.sample_op_obj = sample_op.SampleOperation()
self.anomaly_op_obj = anomaly_op.AbnormalOperation()
self.iforest_obj = isolation_forest.IForest()
self.ewma_obj = ewma.Ewma()
self.polynomial_obj = polynomial_interpolation.PolynomialInterpolation()
self.statistic_obj = statistic.Statistic()
self.supervised_obj = xgboosting.XGBoosting()
def __generate_model(self, data, task_id):
"""
Start train a model
:param data: Training dataset.
:param task_id: The id of the training task.
"""
xgb_obj = xgboosting.XGBoosting()
# pylint: disable=unused-variable
ret_code, ret_data = xgb_obj.xgb_train(data, task_id)
current_timestamp = int(time.time())
train_op_obj = train_op.TrainOperation()
if ret_code == 0:
train_status = "complete"
params = {
"task_id": task_id,
"end_time": current_timestamp,
"status": train_status,
"model_name": task_id + "_model"
}
else:
train_status = "failed"
params = {
"task_id": task_id,
"end_time": current_timestamp,
"status": train_status,
"model_name": ""
}
train_op_obj.update_model_info(params)
def process_train(self, data):
"""
Start a process to train model
:param data: Training dataset.
"""
sample_params = {
"trainOrTest": data["trainOrTest"],
"positiveOrNegative": data["positiveOrNegative"],
"source": data["source"],
"beginTime": data["beginTime"],
"endTime": data["endTime"]
}
samples = self.sample_op_obj.sample_query_all(sample_params)
train_op_obj = train_op.TrainOperation()
samples_list = []
positive_count = 0
negative_count = 0
for index in samples:
samples_list.append({"flag": index["flag"], "data": map(int, index["data"].split(','))})
if index["flag"] == 1:
positive_count = positive_count + 1
else:
negative_count = negative_count + 1
task_id = str(int(round(time.time() * 1000)))
train_params = {
"begin_time": int(time.time()),
"end_time": int(time.time()),
"task_id": task_id,
"status": "running",
"source": data["source"],
"sample_num": len(samples_list),
"postive_sample_num": positive_count,
"negative_sample_num": negative_count
}
if positive_count == 0 or negative_count == 0:
return build_ret_data(LACK_SAMPLE, "")
train_op_obj.insert_train_info(train_params)
try:
t = threading.Thread(target=self.__generate_model, args=(samples_list, task_id, ))
t.setDaemon(False)
t.start()
except Exception:
train_status = "failed"
params = {
"task_id": task_id,
"end_time": int(time.time()),
"status": train_status,
"model_name": ""
}
train_op_obj.update_model_info(params)
return build_ret_data(OP_SUCCESS, "")
def __list_is_digit(self, data):
for index in data:
try:
float(index)
except ValueError:
return False
return True
def __check_param(self, data):
if ("viewName" not in data.keys()) or ("attrId" not in data.keys()) or ("attrName" not in data.keys()) or ("time" not in data.keys()) or ("dataC" not in data.keys()) or ("dataB" not in data.keys()) or ("dataA" not in data.keys()):
return CHECK_PARAM_FAILED, "missing parameter"
if not data['dataA']:
return CHECK_PARAM_FAILED, "dataA can not be empty"
if not data['dataB']:
return CHECK_PARAM_FAILED, "dataB can not be empty"
if not data['dataC']:
return CHECK_PARAM_FAILED, "dataC can not be empty"
if not self.__list_is_digit(data['dataA'].split(',')):
return CHECK_PARAM_FAILED, "dataA contains illegal numbers"
if not self.__list_is_digit(data['dataB'].split(',')):
return CHECK_PARAM_FAILED, "dataB contains illegal numbers"
if not self.__list_is_digit(data['dataC'].split(',')):
return CHECK_PARAM_FAILED, "dataC contains illegal numbers"
if "window" in data:
window = data["window"]
else:
window = 180
if len(data['dataC'].split(',')) != (2 * window + 1):
return CHECK_PARAM_FAILED, "dataC is not long enough"
if len(data['dataB'].split(',')) != (2 * window + 1):
return CHECK_PARAM_FAILED, "dataB is not long enough"
if len(data['dataA'].split(',')) != (window + 1):
return CHECK_PARAM_FAILED, "dataA is not long enough"
return OP_SUCCESS, ""
def value_predict(self, data):
"""
Predict the data
:param data: the time series to detect of
"""
ret_code, ret_data = self.__check_param(data)
if ret_code != OP_SUCCESS:
return build_ret_data(ret_code, ret_data)
if "taskId" in data and data["taskId"]:
model_name = MODEL_PATH + data["taskId"] + "_model"
else:
model_name = MODEL_PATH + "xgb_default_model"
combined_data = data["dataC"] + "," + data["dataB"] + "," + data["dataA"]
time_series = map(int, combined_data.split(','))
if "window" in data:
window = data["window"]
else:
window = 180
statistic_result = self.statistic_obj.predict(time_series)
ewma_result = self.ewma_obj.predict(time_series)
polynomial_result = self.polynomial_obj.predict(time_series, window)
iforest_result = self.iforest_obj.predict(time_series, window)
if statistic_result == 0 or ewma_result == 0 or polynomial_result == 0 or iforest_result == 0:
xgb_result = self.supervised_obj.predict(time_series, window, model_name)
res_value = xgb_result[0]
prob = xgb_result[1]
else:
res_value = 1
prob = 1
ret_data = {"ret": res_value, "p": str(prob)}
if ret_data["ret"] == 0:
anomaly_params = {
"view_id": data["viewId"],
"view_name": data["viewName"],
"attr_id": data["attrId"],
"attr_name": data["attrName"],
"time": data["time"],
"data_c": data["dataC"],
"data_b": data["dataB"],
"data_a": data["dataA"]
}
self.anomaly_op_obj.insert_anomaly(anomaly_params)
return build_ret_data(OP_SUCCESS, ret_data)
def rate_predict(self, data):
combined_data = data["dataC"] + "," + data["dataB"] + "," + data["dataA"]
time_series = map(float, combined_data.split(','))
statistic_result = self.statistic_obj.predict(time_series)
if statistic_result == 0:
prob = 0
else:
prob = 1
ret_data = {"ret": statistic_result, "p": str(prob)}
if ret_data["ret"] == 0:
anomaly_params = {
"view_id": data["viewId"],
"view_name": data["viewName"],
"attr_id": data["attrId"],
"attr_name": data["attrName"],
"time": data["time"],
"data_c": data["dataC"],
"data_b": data["dataB"],
"data_a": data["dataA"]
}
self.anomaly_op_obj.insert_anomaly(anomaly_params)
return build_ret_data(OP_SUCCESS, ret_data)
| 2,964 | 4,982 | 23 |
f99282b9e36d980fa7054adc8c003058f5cae385 | 438 | py | Python | open_data/dataset/migrations/0020_answer_source.py | balfroim/OpenData | f0334dae16c2806e81f7d2d53adeabc72403ecce | [
"MIT"
] | null | null | null | open_data/dataset/migrations/0020_answer_source.py | balfroim/OpenData | f0334dae16c2806e81f7d2d53adeabc72403ecce | [
"MIT"
] | null | null | null | open_data/dataset/migrations/0020_answer_source.py | balfroim/OpenData | f0334dae16c2806e81f7d2d53adeabc72403ecce | [
"MIT"
] | null | null | null | # Generated by Django 3.2.2 on 2021-05-10 09:17
from django.db import migrations, models
| 23.052632 | 72 | 0.591324 | # Generated by Django 3.2.2 on 2021-05-10 09:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dataset', '0019_alter_datasetship_unique_together'),
]
operations = [
migrations.AddField(
model_name='answer',
name='source',
field=models.URLField(blank=True, default=None, null=True),
),
]
| 0 | 318 | 25 |
db1f64329b0d965894c99cd67615f32c820b198b | 9,913 | py | Python | Code/WorldMap.py | Shahrose/lex-talionis | ef7e48124b36269f4212eb0e3a7747caf53bfadd | [
"MIT"
] | null | null | null | Code/WorldMap.py | Shahrose/lex-talionis | ef7e48124b36269f4212eb0e3a7747caf53bfadd | [
"MIT"
] | null | null | null | Code/WorldMap.py | Shahrose/lex-talionis | ef7e48124b36269f4212eb0e3a7747caf53bfadd | [
"MIT"
] | null | null | null | from . import GlobalConstants as GC
from . import configuration as cf
from . import Utility, Image_Modification, Engine, GenericMapSprite
| 35.27758 | 159 | 0.597196 | from . import GlobalConstants as GC
from . import configuration as cf
from . import Utility, Image_Modification, Engine, GenericMapSprite
class WorldMapBackground(object):
def __init__(self, sprite, labels=True):
self.x = 0
self.y = 0
self.sprite = sprite
# for easing
self.easing_flag = False
self.target_x = 0
self.target_y = 0
self.old_x = 0
self.old_y = 0
self.start_time = 0
# Dictionary of world map sprites
self.wm_sprites = {}
# Labels for world map
self.wm_labels = []
if labels:
self.parse_labels('Data/world_map_labels.txt')
# Highlights
self.wm_highlights = []
self.wm_markers = []
# Cursor
self.cursor = None
def parse_labels(self, fp):
with open(fp, mode='r', encoding='utf-8') as label_data:
for line in label_data:
split_line = line.strip().split(';')
coord = (int(split_line[1]), int(split_line[2]))
if split_line[3] == '1':
font = GC.FONT['chapter_yellow']
else:
font = GC.FONT['chapter_green']
self.add_label(split_line[0], coord, font)
def add_label(self, name, position):
self.wm_labels.append(WMLabel(name, position))
def clear_labels(self):
for label in self.wm_labels:
label.remove()
def add_highlight(self, sprite, position):
self.wm_highlights.append(WMHighlight(sprite, position))
def clear_highlights(self):
for highlight in self.wm_highlights:
highlight.remove()
def add_marker(self, sprite, position):
self.wm_markers.append(WMMarker(sprite, position))
def clear_markers(self):
self.wm_markers = []
def add_sprite(self, name, klass, gender, team, position, transition_in=False):
# Key is string assigned by user. Value is units class, gender, team, starting_position
self.wm_sprites[name] = GenericMapSprite.GenericMapSprite(klass, gender, team, position, transition_in)
def quick_remove_sprite(self, name):
del self.wm_sprites[name]
def remove_sprite(self, name):
if name in self.wm_sprites:
self.wm_sprites[name].remove()
elif cf.OPTIONS['debug']:
print('Error! ', name, ' not in self.wm_sprites')
def move_sprite(self, name, new_pos, slow=False):
if name in self.wm_sprites:
self.wm_sprites[name].move(new_pos, slow)
elif cf.OPTIONS['debug']:
print('Error! ', name, ' not in self.wm_sprites')
def set_sprite(self, name, new_pos, slow=False):
if name in self.wm_sprites:
self.wm_sprites[name].set_target(new_pos, slow)
elif cf.OPTIONS['debug']:
print('Error! ', name, ' not in self.wm_sprites')
def offset_sprite(self, name, new_pos):
if name in self.wm_sprites:
self.wm_sprites[name].offset(new_pos)
elif cf.OPTIONS['debug']:
print('Error! ', name, ' not in self.wm_sprites')
def teleport_sprite(self, name, new_pos):
if name in self.wm_sprites:
self.wm_sprites[name].teleport(new_pos)
elif cf.OPTIONS['debug']:
print('Error! ', name, ' not in self.wm_sprites')
def focus_sprite(self, name):
if name in self.wm_sprites:
self.wm_sprites[name].hovered = True
def unfocus_sprite(self, name):
if name in self.wm_sprites:
self.wm_sprites[name].hovered = False
def quick_pan(self, new_pos):
self.x += new_pos[0]
self.y += new_pos[1]
self.x, self.y = self.bound(self.x, self.y)
def pan(self, new_pos):
self.old_x = self.x
self.old_y = self.y
self.target_x = self.x + new_pos[0]
self.target_y = self.y + new_pos[1]
self.start_time = Engine.get_time()
self.easing_flag = True
self.target_x, self.target_y = self.bound(self.target_x, self.target_y)
def bound(self, x, y):
x = Utility.clamp(x, 0, self.sprite.get_width() - GC.WINWIDTH)
y = Utility.clamp(y, 0, self.sprite.get_height() - GC.WINHEIGHT)
return x, y
def create_cursor(self, coord):
from .Cursor import Cursor
self.cursor = Cursor('Cursor', coord, fake=True)
def remove_cursor(self):
self.cursor = None
def draw(self, surf):
# === UPDATE ===
# Handle easing
current_time = Engine.get_time()
if self.easing_flag:
self.x = Utility.easing(current_time - self.start_time, self.old_x, self.target_x - self.old_x, 400)
self.y = Utility.easing(current_time - self.start_time, self.old_y, self.target_y - self.old_y, 400)
if self.target_x > self.old_x and self.x >= self.target_x or \
self.target_x < self.old_x and self.x <= self.target_x or \
self.target_y > self.old_y and self.y >= self.target_y or \
self.target_y < self.old_y and self.y <= self.target_y:
self.easing_flag = False
self.x = self.target_x
self.y = self.target_y
# === DRAW ===
image = Engine.copy_surface(self.sprite)
# Markers
for marker in self.wm_markers:
marker.draw(image)
# Highlights
for highlight in self.wm_highlights:
highlight.draw(image)
self.wm_highlights = [highlight for highlight in self.wm_highlights if not highlight.remove_me_now()]
# Draw label
for label in self.wm_labels:
label.draw(image)
self.wm_labels = [label for label in self.wm_labels if not label.remove_me_now()]
# Update world_map_sprites
for key, wm_unit in self.wm_sprites.items():
wm_unit.update()
# World map sprites
sorted_sprites = sorted(list(self.wm_sprites.values()), key=lambda unit: unit.position[1])
for wm_unit in sorted_sprites:
wm_unit.draw(image)
self.wm_sprites = {name: unit for name, unit in self.wm_sprites.items() if not unit.remove_flag}
# Cursor
if self.cursor:
self.cursor.image = Engine.subsurface(self.cursor.passivesprite, (GC.CURSORSPRITECOUNTER.count*GC.TILEWIDTH*2, 0, GC.TILEWIDTH*2, GC.TILEHEIGHT*2))
self.cursor.draw(image)
image = Engine.subsurface(image, (self.x, self.y, GC.WINWIDTH, GC.WINHEIGHT))
surf.blit(image, (0, 0))
class WMLabel(object):
def __init__(self, name, position):
self.font = GC.FONT['label_white']
if name in GC.IMAGESDICT:
self.surface = GC.IMAGESDICT[name]
else:
size = self.font.size(name)
self.surface = Engine.create_surface(size, transparent=True)
self.font.blit(name, self.surface, (0, 0))
self.position = position
self.state = 'fade_in'
self.transition_counter = 0
self.transition_speed = 12
def draw(self, surf):
if self.state == 'fade_in':
progress = float(self.transition_counter)/self.transition_speed
transparency = 100 - int(100*progress)
image = Image_Modification.flickerImageTranslucent(self.surface, transparency)
pos_x = int(Utility.quad_ease_out(16, 0, progress, 1))
pos = self.position[0] + pos_x, self.position[1]
if progress >= 1:
self.state = 'normal'
else:
self.transition_counter += 1
elif self.state == 'normal':
image = self.surface
pos = self.position
elif self.state == 'fade_out':
progress = float(self.transition_counter)/self.transition_speed
transparency = 100 - int(100*progress)
image = Image_Modification.flickerImageTranslucent(self.surface, transparency)
pos_x = int(Utility.quad_ease_out(16, 0, progress, 1))
pos = self.position[0] - pos_x, self.position[1]
if progress <= 0:
self.state = 'remove_me'
else:
self.transition_counter -= 1
surf.blit(image, pos)
def remove(self):
self.state = 'fade_out'
def remove_me_now(self):
return self.state == 'remove_me'
class WMHighlight(object):
def __init__(self, sprite, position):
self.sprite = sprite
self.position = position
self.trans_value = 0
self.trans_dir = True
self.remove_asap = False
self.remove_flag = False
def update(self):
if self.trans_dir:
self.trans_value += 2
if self.trans_value >= 100:
self.trans_value = 100
self.trans_dir = False
else:
self.trans_value -= 2
if self.trans_value <= 0:
self.trans_value = 0
self.trans_dir = True
if self.remove_asap:
self.remove_flag = True
return False
def remove(self):
self.remove_asap = True
def remove_me_now(self):
return self.remove_flag
def draw(self, surf):
self.update()
image = Image_Modification.flickerImageTranslucentBlend(self.sprite, 2.55*self.trans_value)
Engine.blit(surf, image, self.position, None, Engine.BLEND_RGB_ADD)
# surf.blit(image, self.position)
class WMMarker(object):
def __init__(self, sprite, position):
self.sprite = sprite
self.position = position
self.current_idx = 0
def draw(self, surf):
self.current_idx += 1
x_pos = (self.current_idx//8)%8
y_pos = self.current_idx%8
image = Engine.subsurface(self.sprite, (x_pos*8, y_pos*8, 8, 8))
surf.blit(image, self.position)
| 8,749 | 20 | 1,006 |
32a1b8a06f3d67d2bf12c8fa0306b5ff232b7ab7 | 845 | py | Python | steps/step47.py | daisuke19891023/dl-from-scratch-3 | 4fb9441cfcceca96ac07c602053e608d92c80838 | [
"MIT"
] | null | null | null | steps/step47.py | daisuke19891023/dl-from-scratch-3 | 4fb9441cfcceca96ac07c602053e608d92c80838 | [
"MIT"
] | 2 | 2020-05-30T15:04:54.000Z | 2020-05-30T15:05:20.000Z | steps/step47.py | daisuke19891023/dl-from-scratch-3 | 4fb9441cfcceca96ac07c602053e608d92c80838 | [
"MIT"
] | null | null | null | if '__file__' in globals():
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable, Model, as_variable
from dezero import setup_variable
from dezero.utils import plot_dot_graph
import dezero.functions as F
from dezero import optimizers
from dezero.models import MLP
setup_variable()
if __name__ == '__main__':
x = Variable(np.array([[0.2, -0.4]]))
model = MLP((10, 3))
y = model(x)
p = softmaxld(y)
print(y)
print(p)
a = np.array([[0.2, -0.4], [0.3, 0.5], [1.3, -3.2], [2.1, 0.3]])
t = np.array([2, 0, 1, 0])
z = model(a)
loss = F.softmax_cross_entropy_simple(z, t)
print(loss)
| 22.837838 | 68 | 0.602367 | if '__file__' in globals():
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
from dezero import Variable, Model, as_variable
from dezero import setup_variable
from dezero.utils import plot_dot_graph
import dezero.functions as F
from dezero import optimizers
from dezero.models import MLP
setup_variable()
def softmaxld(x):
x = as_variable(x)
y = F.exp(x)
sum_y = F.sum(y)
return y / sum_y
if __name__ == '__main__':
x = Variable(np.array([[0.2, -0.4]]))
model = MLP((10, 3))
y = model(x)
p = softmaxld(y)
print(y)
print(p)
a = np.array([[0.2, -0.4], [0.3, 0.5], [1.3, -3.2], [2.1, 0.3]])
t = np.array([2, 0, 1, 0])
z = model(a)
loss = F.softmax_cross_entropy_simple(z, t)
print(loss)
| 78 | 0 | 23 |
cf20735e73c62d19893752ac999e845017d3884d | 794 | py | Python | scripts/llvm_sandbox.py | jevinskie/pypcode-emu | 49d62090df9b25a7c4e35eee3532c3763f4e4286 | [
"MIT"
] | 13 | 2022-02-27T03:35:24.000Z | 2022-03-21T10:39:16.000Z | scripts/llvm_sandbox.py | jevinskie/pypcode-emu | 49d62090df9b25a7c4e35eee3532c3763f4e4286 | [
"MIT"
] | null | null | null | scripts/llvm_sandbox.py | jevinskie/pypcode-emu | 49d62090df9b25a7c4e35eee3532c3763f4e4286 | [
"MIT"
] | 1 | 2022-03-21T01:24:00.000Z | 2022-03-21T01:24:00.000Z | #!/usr/bin/env python3
from llvmlite import ir
i1 = ir.IntType(1)
i8 = ir.IntType(8)
i16 = ir.IntType(16)
i32 = ir.IntType(32)
i64 = ir.IntType(64)
void = ir.VoidType()
m = ir.Module()
fty = ir.FunctionType(void, [i32, i32, i32])
f = ir.Function(m, fty, "cmov_test")
entry = f.append_basic_block("entry")
bld = ir.IRBuilder(entry)
cond_v = f.args[0]
cond_v.name = "cond"
true_v = f.args[1]
true_v.name = "true_val"
false_v = f.args[2]
false_v.name = "false_val"
bool_v = bld.icmp_unsigned("==", cond_v, cond_v.type(0), name="cmov_cond")
# cur_bb = bld.basic_block
# with bld.if_else(bool_v) as (then, otherwise):
# with then:
# true_bb = bld.basic_block
# with otherwise:
# false_bb = bld.basic_block
bld.select(bool_v, true_v, false_v, name="cmov_val")
print(m)
| 22.055556 | 74 | 0.671285 | #!/usr/bin/env python3
from llvmlite import ir
i1 = ir.IntType(1)
i8 = ir.IntType(8)
i16 = ir.IntType(16)
i32 = ir.IntType(32)
i64 = ir.IntType(64)
void = ir.VoidType()
m = ir.Module()
fty = ir.FunctionType(void, [i32, i32, i32])
f = ir.Function(m, fty, "cmov_test")
entry = f.append_basic_block("entry")
bld = ir.IRBuilder(entry)
cond_v = f.args[0]
cond_v.name = "cond"
true_v = f.args[1]
true_v.name = "true_val"
false_v = f.args[2]
false_v.name = "false_val"
bool_v = bld.icmp_unsigned("==", cond_v, cond_v.type(0), name="cmov_cond")
# cur_bb = bld.basic_block
# with bld.if_else(bool_v) as (then, otherwise):
# with then:
# true_bb = bld.basic_block
# with otherwise:
# false_bb = bld.basic_block
bld.select(bool_v, true_v, false_v, name="cmov_val")
print(m)
| 0 | 0 | 0 |
44387f35f95190fe89cac4c805fc410df407736f | 61 | py | Python | jupyterlab_nbconvert_nocode/nbconvert_functions/hideinput/__init__.py | vidartf/jupyterlab_nbconvert_nocode | c7b43f2df4a85415b923d3a87d95e5eb11ae437a | [
"Apache-2.0"
] | 12 | 2020-06-09T10:21:18.000Z | 2021-11-24T15:12:27.000Z | jupyterlab_nbconvert_nocode/nbconvert_functions/hideinput/__init__.py | vidartf/jupyterlab_nbconvert_nocode | c7b43f2df4a85415b923d3a87d95e5eb11ae437a | [
"Apache-2.0"
] | 18 | 2020-12-29T17:28:34.000Z | 2022-03-29T15:12:31.000Z | jupyterlab_nbconvert_nocode/nbconvert_functions/hideinput/__init__.py | vidartf/jupyterlab_nbconvert_nocode | c7b43f2df4a85415b923d3a87d95e5eb11ae437a | [
"Apache-2.0"
] | 2 | 2021-04-23T10:27:24.000Z | 2021-05-22T15:45:12.000Z | from .exporters import export_html, export_pdf # noqa: F401
| 30.5 | 60 | 0.786885 | from .exporters import export_html, export_pdf # noqa: F401
| 0 | 0 | 0 |
205f7a4093047bb8a4a2aefba2a390efb2f5fbae | 2,095 | py | Python | sovtokenfees/sovtokenfees/test/chain/test_fees_chain.py | burdettadam/token-plugin | 7d981d0edca174b00e20a0e46c125bbeca399bae | [
"Apache-2.0"
] | 9 | 2018-10-26T04:59:51.000Z | 2021-06-10T13:30:51.000Z | sovtokenfees/sovtokenfees/test/chain/test_fees_chain.py | burdettadam/token-plugin | 7d981d0edca174b00e20a0e46c125bbeca399bae | [
"Apache-2.0"
] | 29 | 2018-09-11T17:07:00.000Z | 2021-04-16T20:22:19.000Z | sovtokenfees/sovtokenfees/test/chain/test_fees_chain.py | burdettadam/token-plugin | 7d981d0edca174b00e20a0e46c125bbeca399bae | [
"Apache-2.0"
] | 26 | 2018-09-11T16:49:40.000Z | 2021-06-06T10:43:55.000Z | import pytest
from sovtokenfees.test.constants import NYM_FEES_ALIAS
from sovtokenfees.test.helper import add_fees_request_with_address
from indy_common.constants import NYM
from plenum.common.exceptions import RequestRejectedException
| 41.9 | 136 | 0.767064 | import pytest
from sovtokenfees.test.constants import NYM_FEES_ALIAS
from sovtokenfees.test.helper import add_fees_request_with_address
from indy_common.constants import NYM
from plenum.common.exceptions import RequestRejectedException
def send_nym_with_fees(helpers, address, change_address, fees_set, adjust_fees=0):
(did, verkey) = helpers.wallet.create_did()
request = helpers.request.nym(dest=did, verkey=verkey)
request = add_fees_request_with_address(helpers, fees_set, request, address, change_address=change_address, adjust_fees=adjust_fees)
helpers.sdk.send_and_check_request_objects([request])
return did
def test_fees_chain(addresses, helpers, mint_tokens, fees_set, looper):
helpers.general.do_set_fees({NYM_FEES_ALIAS: 4})
send_nym_with_fees(helpers, addresses[0], addresses[1], fees_set)
send_nym_with_fees(helpers, addresses[1], addresses[2], fees_set)
send_nym_with_fees(helpers, addresses[2], addresses[3], fees_set)
utxos = helpers.general.get_utxo_addresses(addresses[3:])
assert utxos[0][0]["amount"] == 988
def test_fees_chain_negative(addresses, helpers, mint_tokens, fees_set, looper):
helpers.general.do_set_fees({NYM_FEES_ALIAS: 4})
send_nym_with_fees(helpers, addresses[0], addresses[1], fees_set)
with pytest.raises(RequestRejectedException):
send_nym_with_fees(helpers, addresses[1], addresses[2], fees_set, 1)
send_nym_with_fees(helpers, addresses[1], addresses[3], fees_set)
utxos = helpers.general.get_utxo_addresses(addresses[3:])
assert utxos[0][0]["amount"] == 992
def test_fees_chain_positive(addresses, helpers, mint_tokens, fees_set, looper):
helpers.general.do_set_fees({NYM_FEES_ALIAS: 4})
send_nym_with_fees(helpers, addresses[0], addresses[1], fees_set)
with pytest.raises(RequestRejectedException):
send_nym_with_fees(helpers, addresses[1], addresses[2], fees_set, -1)
send_nym_with_fees(helpers, addresses[1], addresses[3], fees_set)
utxos = helpers.general.get_utxo_addresses(addresses[3:])
assert utxos[0][0]["amount"] == 992
| 1,762 | 0 | 92 |
8612c165092041ec838f04a30477eca922b7a884 | 6,008 | py | Python | api/blueprints/auth/views/tokens.py | Shubhaankar-sharma/API | 418339481ae70c6a661c972355c33172e4920df8 | [
"MIT"
] | 1 | 2021-05-01T02:25:27.000Z | 2021-05-01T02:25:27.000Z | api/blueprints/auth/views/tokens.py | mohamed040406/API | 40ceb2b35271938d90e4309a6cdcf63ba0c17f0b | [
"MIT"
] | null | null | null | api/blueprints/auth/views/tokens.py | mohamed040406/API | 40ceb2b35271938d90e4309a6cdcf63ba0c17f0b | [
"MIT"
] | null | null | null | from quart import current_app, request, redirect, jsonify
from urllib.parse import quote_plus, parse_qs, urlparse
from quart.exceptions import MethodNotAllowed
from datetime import datetime, timedelta
from typing import List, Tuple
import jwt
import os
from api.models import Token, User
from api.app import API
from .. import bp
import utils
DISCORD_ENDPOINT = "https://discord.com/api"
request: utils.Request
SCOPES = ["identify"]
current_app: API
async def exchange_code(
*, code: str, scope: str, redirect_uri: str, grant_type: str = "authorization_code"
) -> Tuple[dict, int]:
"""Exchange discord oauth code for access and refresh tokens."""
async with current_app.http_session.post(
"%s/v6/oauth2/token" % DISCORD_ENDPOINT,
data=dict(
code=code,
scope=scope,
grant_type=grant_type,
redirect_uri=redirect_uri,
client_id=os.environ["DISCORD_CLIENT_ID"],
client_secret=os.environ["DISCORD_CLIENT_SECRET"],
),
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
return await response.json(), response.status
async def get_user(access_token: str) -> dict:
"""Coroutine to fetch User data from discord using the users `access_token`"""
async with current_app.http_session.get(
"%s/v6/users/@me" % DISCORD_ENDPOINT,
headers={"Authorization": "Bearer %s" % access_token},
) as response:
return await response.json()
def format_scopes(scopes: List[str]) -> str:
"""Format a list of scopes."""
return " ".join(scopes)
def get_redirect(callback: str, scopes: List[str]) -> str:
"""Generates the correct oauth link depending on our provided arguments."""
return (
"{BASE}/oauth2/authorize?response_type=code"
"&client_id={client_id}"
"&scope={scopes}"
"&redirect_uri={redirect_uri}"
"&prompt=consent"
).format(
BASE=DISCORD_ENDPOINT,
client_id=os.environ["DISCORD_CLIENT_ID"],
scopes=format_scopes(scopes),
redirect_uri=quote_plus(callback),
)
def is_valid_url(string: str) -> bool:
"""Returns boolean describing if the provided string is a url"""
result = urlparse(string)
return all((result.scheme, result.netloc))
@bp.route("/discord/redirect", methods=["GET"])
async def redirect_to_discord_oauth_portal():
"""Redirect user to correct oauth link depending on specified domain and requested scopes."""
qs = parse_qs(request.query_string.decode())
callback = qs.get(
"callback", (request.scheme + "://" + request.host + "/auth/discord/callback")
)
if isinstance(callback, list): #
callback = callback[0]
if not is_valid_url(callback):
return (
jsonify(
{"error": "Bad Request", "message": "Not a well formed redirect URL."}
),
400,
)
return redirect(get_redirect(callback=callback, scopes=SCOPES))
@bp.route("/discord/callback", methods=["GET", "POST"])
async def discord_oauth_callback():
"""
Callback endpoint for finished discord authorization flow.
GET -> Only used in DEBUG mode.
Gets code from querystring.
POST -> Gets code from request data.
"""
if request.method == "GET":
if not current_app.debug:
# A GET request to this endpoint should only be used in testing.
raise MethodNotAllowed(("POST",))
qs = parse_qs(request.query_string.decode())
code = qs.get("code")
if code is not None:
code = code[0]
callback = request.scheme + "://" + request.host + "/auth/discord/callback"
elif request.method == "POST":
data = await request.json
code = data.get("code")
callback = data.get("callback", "")
else:
raise RuntimeWarning("Unexpected request method. (%s)" % request.method)
if code is None:
return (
jsonify(
{
"error": "Bad Request",
"message": "Missing code in %s." % "querystring arguments"
if request.method == "GET"
else "JSON data",
}
),
400,
)
if not is_valid_url(callback):
return (
jsonify(
{"error": "Bad Request", "message": "Not a well formed redirect URL."}
),
400,
)
access_data, status_code = await exchange_code(
code=code, scope=format_scopes(SCOPES), redirect_uri=callback
)
if access_data.get("error", False):
if status_code == 400:
return (
jsonify(
{
"error": "Bad Request",
"message": "Discord returned 400 status.",
"data": access_data,
}
),
400,
)
raise RuntimeWarning(
"Unpredicted status_code.\n%s\n%s" % (str(access_data), status_code)
)
expires_at = datetime.utcnow() + timedelta(seconds=access_data["expires_in"])
expires_at.replace(microsecond=0)
user_data = await get_user(access_token=access_data["access_token"])
user_data["id"] = uid = int(user_data["id"])
user = await User.fetch(id=uid)
if user is None:
user = await User.create(
id=user_data["id"],
username=user_data["username"],
discriminator=user_data["discriminator"],
avatar=user_data["avatar"],
)
await Token(
user_id=user.id,
data=access_data,
expires_at=expires_at,
token=access_data["access_token"],
).update()
token = jwt.encode(
{"uid": user.id, "exp": expires_at, "iat": datetime.utcnow()},
key=os.environ["SECRET_KEY"],
)
return jsonify(token=token, exp=expires_at)
| 30.190955 | 97 | 0.593708 | from quart import current_app, request, redirect, jsonify
from urllib.parse import quote_plus, parse_qs, urlparse
from quart.exceptions import MethodNotAllowed
from datetime import datetime, timedelta
from typing import List, Tuple
import jwt
import os
from api.models import Token, User
from api.app import API
from .. import bp
import utils
DISCORD_ENDPOINT = "https://discord.com/api"
request: utils.Request
SCOPES = ["identify"]
current_app: API
async def exchange_code(
*, code: str, scope: str, redirect_uri: str, grant_type: str = "authorization_code"
) -> Tuple[dict, int]:
"""Exchange discord oauth code for access and refresh tokens."""
async with current_app.http_session.post(
"%s/v6/oauth2/token" % DISCORD_ENDPOINT,
data=dict(
code=code,
scope=scope,
grant_type=grant_type,
redirect_uri=redirect_uri,
client_id=os.environ["DISCORD_CLIENT_ID"],
client_secret=os.environ["DISCORD_CLIENT_SECRET"],
),
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
return await response.json(), response.status
async def get_user(access_token: str) -> dict:
"""Coroutine to fetch User data from discord using the users `access_token`"""
async with current_app.http_session.get(
"%s/v6/users/@me" % DISCORD_ENDPOINT,
headers={"Authorization": "Bearer %s" % access_token},
) as response:
return await response.json()
def format_scopes(scopes: List[str]) -> str:
"""Format a list of scopes."""
return " ".join(scopes)
def get_redirect(callback: str, scopes: List[str]) -> str:
"""Generates the correct oauth link depending on our provided arguments."""
return (
"{BASE}/oauth2/authorize?response_type=code"
"&client_id={client_id}"
"&scope={scopes}"
"&redirect_uri={redirect_uri}"
"&prompt=consent"
).format(
BASE=DISCORD_ENDPOINT,
client_id=os.environ["DISCORD_CLIENT_ID"],
scopes=format_scopes(scopes),
redirect_uri=quote_plus(callback),
)
def is_valid_url(string: str) -> bool:
"""Returns boolean describing if the provided string is a url"""
result = urlparse(string)
return all((result.scheme, result.netloc))
@bp.route("/discord/redirect", methods=["GET"])
async def redirect_to_discord_oauth_portal():
"""Redirect user to correct oauth link depending on specified domain and requested scopes."""
qs = parse_qs(request.query_string.decode())
callback = qs.get(
"callback", (request.scheme + "://" + request.host + "/auth/discord/callback")
)
if isinstance(callback, list): #
callback = callback[0]
if not is_valid_url(callback):
return (
jsonify(
{"error": "Bad Request", "message": "Not a well formed redirect URL."}
),
400,
)
return redirect(get_redirect(callback=callback, scopes=SCOPES))
@bp.route("/discord/callback", methods=["GET", "POST"])
async def discord_oauth_callback():
"""
Callback endpoint for finished discord authorization flow.
GET -> Only used in DEBUG mode.
Gets code from querystring.
POST -> Gets code from request data.
"""
if request.method == "GET":
if not current_app.debug:
# A GET request to this endpoint should only be used in testing.
raise MethodNotAllowed(("POST",))
qs = parse_qs(request.query_string.decode())
code = qs.get("code")
if code is not None:
code = code[0]
callback = request.scheme + "://" + request.host + "/auth/discord/callback"
elif request.method == "POST":
data = await request.json
code = data.get("code")
callback = data.get("callback", "")
else:
raise RuntimeWarning("Unexpected request method. (%s)" % request.method)
if code is None:
return (
jsonify(
{
"error": "Bad Request",
"message": "Missing code in %s." % "querystring arguments"
if request.method == "GET"
else "JSON data",
}
),
400,
)
if not is_valid_url(callback):
return (
jsonify(
{"error": "Bad Request", "message": "Not a well formed redirect URL."}
),
400,
)
access_data, status_code = await exchange_code(
code=code, scope=format_scopes(SCOPES), redirect_uri=callback
)
if access_data.get("error", False):
if status_code == 400:
return (
jsonify(
{
"error": "Bad Request",
"message": "Discord returned 400 status.",
"data": access_data,
}
),
400,
)
raise RuntimeWarning(
"Unpredicted status_code.\n%s\n%s" % (str(access_data), status_code)
)
expires_at = datetime.utcnow() + timedelta(seconds=access_data["expires_in"])
expires_at.replace(microsecond=0)
user_data = await get_user(access_token=access_data["access_token"])
user_data["id"] = uid = int(user_data["id"])
user = await User.fetch(id=uid)
if user is None:
user = await User.create(
id=user_data["id"],
username=user_data["username"],
discriminator=user_data["discriminator"],
avatar=user_data["avatar"],
)
await Token(
user_id=user.id,
data=access_data,
expires_at=expires_at,
token=access_data["access_token"],
).update()
token = jwt.encode(
{"uid": user.id, "exp": expires_at, "iat": datetime.utcnow()},
key=os.environ["SECRET_KEY"],
)
return jsonify(token=token, exp=expires_at)
| 0 | 0 | 0 |
0029caae435390d28118eff205e8ff9e2e2e2303 | 3,071 | py | Python | Metodos Computacionales Uniandes/Code/ejercicio_21.py | aess14/Cursos-Uniandes | be016b25f2f49788235fbe91ec577fd16b9ad613 | [
"MIT"
] | null | null | null | Metodos Computacionales Uniandes/Code/ejercicio_21.py | aess14/Cursos-Uniandes | be016b25f2f49788235fbe91ec577fd16b9ad613 | [
"MIT"
] | null | null | null | Metodos Computacionales Uniandes/Code/ejercicio_21.py | aess14/Cursos-Uniandes | be016b25f2f49788235fbe91ec577fd16b9ad613 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
# Soluciones a los ejercicios de la seccion 6.2.5
# del libro A Survey of Computational Physics Introductory Computational Science
# de Landau, Paez, Bordeianu (Python Multimodal eTextBook Beta4.0)
#1. Write a double-precision program to integrate an arbitrary function numerically
# using the trapezoid rule, the Simpson rule, and Gaussian quadrature.
# 2 Compute the relative error (epsilon=abs(numerical-exact)/exact) in each case.
# Present your data in tabular form for N=2,10,20,40,80,160
| 34.505618 | 99 | 0.595246 | import numpy as np
import matplotlib.pyplot as plt
# Soluciones a los ejercicios de la seccion 6.2.5
# del libro A Survey of Computational Physics Introductory Computational Science
# de Landau, Paez, Bordeianu (Python Multimodal eTextBook Beta4.0)
#1. Write a double-precision program to integrate an arbitrary function numerically
# using the trapezoid rule, the Simpson rule, and Gaussian quadrature.
def integral(f, a, b, n_points=10, metodo="trapecio"):
# Genera siempre un numero impar de puntos
if n_points%2 == 0:
n_points = n_points + 1
if metodo=="trapecio":
x = np.linspace(a, b, n_points)
h = x[1] - x[0]
w = np.ones(n_points) * h
w[0] = h/2
w[-1] = h/2
elif metodo=="simpson":
x = np.linspace(a, b, n_points)
h = x[1] - x[0]
w = np.ones(n_points)
ii = np.arange(n_points)
w[ii%2!=0] = 4.0*h/3.0
w[ii%2==0] = 2.0*h/3.0
w[0] = h/3
w[-1] = h/3
elif metodo=="cuadratura":
y, wprime = np.polynomial.legendre.leggauss(n_points)
x = 0.5*(b+a) + 0.5*(b-a)*y
w = 0.5*(b-a)*wprime
else:
print('metodo no implementado')
x = np.zeros(n_points)
y = np.zeros(n_points)
return np.sum(f(x)*w)
def func(x):
return np.cos(x)
def error(x):
return np.abs(1-x)
# 2 Compute the relative error (epsilon=abs(numerical-exact)/exact) in each case.
# Present your data in tabular form for N=2,10,20,40,80,160
def integra():
N = [2,10,20,40,80,160]
print("Primera Parte")
out = open("/tmp/tabla_resultados.dat", "w")
print("# N\t e_T\t e_S \t e_G")
for n_points in N:
a = integral(func, 0, np.pi/2, n_points=n_points, metodo="trapecio")
b = integral(func, 0, np.pi/2, n_points=n_points, metodo="simpson")
c = integral(func, 0, np.pi/2, n_points=n_points, metodo="cuadratura")
print("{:d}\t {:.1e} {:.1e} {:.1e}".format(n_points, error(a), error(b), error(c)))
out.write("{:d}\t {:.1e} {:.1e} {:.1e}\n".format(n_points, error(a), error(b), error(c)))
out.close()
print("")
# 3 Make a log-log plot of relative error versus N
data = np.loadtxt("/tmp/tabla_resultados.dat")
plt.figure()
plt.plot(data[:,0], data[:,1], label="Trapecio")
plt.plot(data[:,0], data[:,2], label="Simpson")
plt.plot(data[:,0], data[:,3], label="Cuadratura")
plt.xlabel('N')
plt.ylabel('|error|')
plt.loglog()
plt.legend()
plt.savefig("error_loglogplot.png")
# 4. Use your plot or table to estimate the power-law dependence of the error on N and
# to determine the nuber of decimal places of precision.
for i,m in zip([1,2,3],["Trapecio", "Simpson", "Cuadratura"]):
power_law = (np.log(data[2,i]) - np.log(data[0,i]))/(np.log(data[2,0]) - np.log(data[0,0]))
decimal_places = -np.log10(data[-1,i])
print("Metodo {}".format(m))
print("\t Power Law: {:.1f}".format(power_law))
print("\t Decimal Places: {:d}".format(int(decimal_places)))
| 2,426 | 0 | 91 |
baa701c74e94bb4d67456b7a8468dc39986dc759 | 3,437 | py | Python | RDS/code/rdsim/graphgen.py | chrisjcameron/chrisjcameron.github.io | 98bae30c1227465aba09ee50083688857b573cd5 | [
"MIT"
] | null | null | null | RDS/code/rdsim/graphgen.py | chrisjcameron/chrisjcameron.github.io | 98bae30c1227465aba09ee50083688857b573cd5 | [
"MIT"
] | null | null | null | RDS/code/rdsim/graphgen.py | chrisjcameron/chrisjcameron.github.io | 98bae30c1227465aba09ee50083688857b573cd5 | [
"MIT"
] | null | null | null | import random, math, numpy as np
import networkx
import networkx.utils
#from networkx.generators.classic import empty_graph
def random_stub_triangle_graph(s, t, seed=None):
"""Return a random graph G(s,t) with expected degrees given by s+2*t.
:Parameters:
- `s`: list - count of stubs emanating from node[i]
- `t`: list - count of triangles including node[i]
- `seed`: seed for random number generator (default=None)
>>> z=[10 for i in range(100)]
>>> G=nx.random_stub_triangle_graph
Reference::
@Article{arXiv:0903.4009v1,
author = {M. E. J. Newman},
title = {Random graphs with clustering},
journal = {},
year = {2009},
volume = {},
pages = {},
}
"""
if len(s) != len (t) :
msg = "NetworkXError: stub and triangle vector must be same length"
raise networkx.NetworkXError(msg)
if sum(s)%2 != 0 :
msg = "NetworkXError: sum(stubs) must be even"
raise networkx.NetworkXError(msg)
if sum(t)%3 != 0 :
msg = "NetworkXError: sum(triangles) % 3 must be zero"
raise networkx.NetworkXError(msg)
n = len(s)
# allow self loops, exclude in later code
G=networkx.empty_graph(n,create_using=networkx.Graph())
G.name="random_stub_triangle_graph"
if n==0 or (max(s)==0 and max(t)==0): # done if no edges
return G
#might not be needed
#d = sum(s+2*t)
#rho = 1.0 / float(d) # Vol(G)
if seed is not None:
random.seed(seed)
# connect triangle corners
# Get a list of nodes that have triangle corners
triNodes = [ x for x in range(n) if t[x]>0 ]
tri = list(t)
while len(triNodes) >= 3:
[A,B,C] = random.sample(triNodes,3)
#if not (G.has_edge(A,B) or G.has_edge(A,C) or G.has_edge(B,C)):
G.add_cycle([A,B,C])
for node in [A,B,C]:
tri[node] -= 1
if tri[node] == 0: triNodes.remove(node)
# connect stubs
# Get a list of nodes that have stubs
stubNodes = [ x for x in range(n) if s[x]>0 ]
stubs = list(s)
while len(stubNodes) >= 2:
[A,B] = random.sample(stubNodes,2)
#if not (G.has_edge(A,B)):
G.add_edge(A,B)
for node in [A,B]:
stubs[node] -= 1
if stubs[node] == 0: stubNodes.remove(node)
"""
for node in xrange(n):
for v in xrange(u,n):
if random.random() < w[u]*w[v]*rho:
G.add_edge(u,v)
"""
return G
def max_clustering( degSeq ):
""" Return a valid degree sequence with high clustering.
"""
# Floors given degree sequence, then pair as many edges as possible into
# triangle corners, assigns any left over edges an non-triangle edges
[t,s]=[list(a) for a in zip(*[divmod(math.floor(x),2) for x in degSeq])]
# T must be a multile of 3
removeT = int(sum(t)%3.0)
removeS = int(sum(s)%2.0)
for extra in range(removeT):
edge = random.randint(0,sum(t))
rmIndex = [ x>=edge for x in np.cumsum(t)].index(True)
t[rmIndex] -= 1
for extra in range(removeS):
edge = random.randint(0,sum(s))
rmIndex = [ x>=edge for x in np.cumsum(s)].index(True)
s[rmIndex] -= 1
return [t,s]
| 26.643411 | 77 | 0.550189 | import random, math, numpy as np
import networkx
import networkx.utils
#from networkx.generators.classic import empty_graph
def random_stub_triangle_graph(s, t, seed=None):
"""Return a random graph G(s,t) with expected degrees given by s+2*t.
:Parameters:
- `s`: list - count of stubs emanating from node[i]
- `t`: list - count of triangles including node[i]
- `seed`: seed for random number generator (default=None)
>>> z=[10 for i in range(100)]
>>> G=nx.random_stub_triangle_graph
Reference::
@Article{arXiv:0903.4009v1,
author = {M. E. J. Newman},
title = {Random graphs with clustering},
journal = {},
year = {2009},
volume = {},
pages = {},
}
"""
if len(s) != len (t) :
msg = "NetworkXError: stub and triangle vector must be same length"
raise networkx.NetworkXError(msg)
if sum(s)%2 != 0 :
msg = "NetworkXError: sum(stubs) must be even"
raise networkx.NetworkXError(msg)
if sum(t)%3 != 0 :
msg = "NetworkXError: sum(triangles) % 3 must be zero"
raise networkx.NetworkXError(msg)
n = len(s)
# allow self loops, exclude in later code
G=networkx.empty_graph(n,create_using=networkx.Graph())
G.name="random_stub_triangle_graph"
if n==0 or (max(s)==0 and max(t)==0): # done if no edges
return G
#might not be needed
#d = sum(s+2*t)
#rho = 1.0 / float(d) # Vol(G)
if seed is not None:
random.seed(seed)
# connect triangle corners
# Get a list of nodes that have triangle corners
triNodes = [ x for x in range(n) if t[x]>0 ]
tri = list(t)
while len(triNodes) >= 3:
[A,B,C] = random.sample(triNodes,3)
#if not (G.has_edge(A,B) or G.has_edge(A,C) or G.has_edge(B,C)):
G.add_cycle([A,B,C])
for node in [A,B,C]:
tri[node] -= 1
if tri[node] == 0: triNodes.remove(node)
# connect stubs
# Get a list of nodes that have stubs
stubNodes = [ x for x in range(n) if s[x]>0 ]
stubs = list(s)
while len(stubNodes) >= 2:
[A,B] = random.sample(stubNodes,2)
#if not (G.has_edge(A,B)):
G.add_edge(A,B)
for node in [A,B]:
stubs[node] -= 1
if stubs[node] == 0: stubNodes.remove(node)
"""
for node in xrange(n):
for v in xrange(u,n):
if random.random() < w[u]*w[v]*rho:
G.add_edge(u,v)
"""
return G
def max_clustering( degSeq ):
""" Return a valid degree sequence with high clustering.
"""
# Floors given degree sequence, then pair as many edges as possible into
# triangle corners, assigns any left over edges an non-triangle edges
[t,s]=[list(a) for a in zip(*[divmod(math.floor(x),2) for x in degSeq])]
# T must be a multile of 3
removeT = int(sum(t)%3.0)
removeS = int(sum(s)%2.0)
for extra in range(removeT):
edge = random.randint(0,sum(t))
rmIndex = [ x>=edge for x in np.cumsum(t)].index(True)
t[rmIndex] -= 1
for extra in range(removeS):
edge = random.randint(0,sum(s))
rmIndex = [ x>=edge for x in np.cumsum(s)].index(True)
s[rmIndex] -= 1
return [t,s]
| 0 | 0 | 0 |
5848876749341a241b2e0fb3fed579e69f7b80ce | 159 | py | Python | __init__.py | GPaolo/Kuka_gym | 9f88022b26ac013da111a9caaf841fa2b1cfc9bd | [
"MIT"
] | null | null | null | __init__.py | GPaolo/Kuka_gym | 9f88022b26ac013da111a9caaf841fa2b1cfc9bd | [
"MIT"
] | null | null | null | __init__.py | GPaolo/Kuka_gym | 9f88022b26ac013da111a9caaf841fa2b1cfc9bd | [
"MIT"
] | null | null | null | # Created by giuseppe
# Date: 22/11/19
from gym.envs.registration import register
register(
id='KukaPush-v0',
entry_point='gym_kuka.envs:KukaPush',
) | 17.666667 | 42 | 0.72327 | # Created by giuseppe
# Date: 22/11/19
from gym.envs.registration import register
register(
id='KukaPush-v0',
entry_point='gym_kuka.envs:KukaPush',
) | 0 | 0 | 0 |
a6ace45da97648132137e3a8077a76ee2ceb0de0 | 7,838 | py | Python | socksio/socks4.py | mIcHyAmRaNe/socksio | e69158473125060879319368e9465d7881cee0b8 | [
"MIT"
] | 30 | 2019-12-04T04:05:46.000Z | 2022-03-15T18:57:48.000Z | socksio/socks4.py | mIcHyAmRaNe/socksio | e69158473125060879319368e9465d7881cee0b8 | [
"MIT"
] | 41 | 2019-12-02T20:30:22.000Z | 2022-03-23T17:24:23.000Z | socksio/socks4.py | sethmlarson/socks | bad06ab5d8581797196ecf2eae1d8776c997567e | [
"MIT"
] | 3 | 2020-03-25T07:58:22.000Z | 2021-11-19T11:53:45.000Z | import enum
import typing
from ._types import StrOrBytes
from .exceptions import ProtocolError, SOCKSError
from .utils import (
AddressType,
decode_address,
encode_address,
get_address_port_tuple_from_address,
)
class SOCKS4ReplyCode(bytes, enum.Enum):
"""Enumeration of SOCKS4 reply codes."""
REQUEST_GRANTED = b"\x5A"
REQUEST_REJECTED_OR_FAILED = b"\x5B"
CONNECTION_FAILED = b"\x5C"
AUTHENTICATION_FAILED = b"\x5D"
class SOCKS4Command(bytes, enum.Enum):
"""Enumeration of SOCKS4 command codes."""
CONNECT = b"\x01"
BIND = b"\x02"
class SOCKS4Request(typing.NamedTuple):
"""Encapsulates a request to the SOCKS4 proxy server
Args:
command: The command to request.
port: The port number to connect to on the target host.
addr: IP address of the target host.
user_id: Optional user ID to be included in the request, if not supplied
the user *must* provide one in the packing operation.
"""
command: SOCKS4Command
port: int
addr: bytes
user_id: typing.Optional[bytes] = None
@classmethod
def from_address(
cls,
command: SOCKS4Command,
address: typing.Union[StrOrBytes, typing.Tuple[StrOrBytes, int]],
user_id: typing.Optional[bytes] = None,
) -> "SOCKS4Request":
"""Convenience class method to build an instance from command and address.
Args:
command: The command to request.
address: A string in the form 'HOST:PORT' or a tuple of ip address string
and port number.
user_id: Optional user ID.
Returns:
A SOCKS4Request instance.
Raises:
SOCKSError: If a domain name or IPv6 address was supplied.
"""
address, port = get_address_port_tuple_from_address(address)
atype, encoded_addr = encode_address(address)
if atype != AddressType.IPV4:
raise SOCKSError(
"IPv6 addresses and domain names are not supported by SOCKS4"
)
return cls(command=command, addr=encoded_addr, port=port, user_id=user_id)
def dumps(self, user_id: typing.Optional[bytes] = None) -> bytes:
"""Packs the instance into a raw binary in the appropriate form.
Args:
user_id: Optional user ID as an override, if not provided the instance's
will be used, if none was provided at initialization an error is raised.
Returns:
The packed request.
Raises:
SOCKSError: If no user was specified in this call or on initialization.
"""
user_id = user_id or self.user_id
if user_id is None:
raise SOCKSError("SOCKS4 requires a user_id, none was specified")
return b"".join(
[
b"\x04",
self.command,
(self.port).to_bytes(2, byteorder="big"),
self.addr,
user_id,
b"\x00",
]
)
class SOCKS4ARequest(typing.NamedTuple):
"""Encapsulates a request to the SOCKS4A proxy server
Args:
command: The command to request.
port: The port number to connect to on the target host.
addr: IP address of the target host.
user_id: Optional user ID to be included in the request, if not supplied
the user *must* provide one in the packing operation.
"""
command: SOCKS4Command
port: int
addr: bytes
user_id: typing.Optional[bytes] = None
@classmethod
def from_address(
cls,
command: SOCKS4Command,
address: typing.Union[StrOrBytes, typing.Tuple[StrOrBytes, int]],
user_id: typing.Optional[bytes] = None,
) -> "SOCKS4ARequest":
"""Convenience class method to build an instance from command and address.
Args:
command: The command to request.
address: A string in the form 'HOST:PORT' or a tuple of ip address string
and port number.
user_id: Optional user ID.
Returns:
A SOCKS4ARequest instance.
"""
address, port = get_address_port_tuple_from_address(address)
atype, encoded_addr = encode_address(address)
return cls(command=command, addr=encoded_addr, port=port, user_id=user_id)
def dumps(self, user_id: typing.Optional[bytes] = None) -> bytes:
"""Packs the instance into a raw binary in the appropriate form.
Args:
user_id: Optional user ID as an override, if not provided the instance's
will be used, if none was provided at initialization an error is raised.
Returns:
The packed request.
Raises:
SOCKSError: If no user was specified in this call or on initialization.
"""
user_id = user_id or self.user_id
if user_id is None:
raise SOCKSError("SOCKS4 requires a user_id, none was specified")
return b"".join(
[
b"\x04",
self.command,
(self.port).to_bytes(2, byteorder="big"),
b"\x00\x00\x00\xFF", # arbitrary final non-zero byte
user_id,
b"\x00",
self.addr,
b"\x00",
]
)
class SOCKS4Reply(typing.NamedTuple):
"""Encapsulates a reply from the SOCKS4 proxy server
Args:
reply_code: The code representing the type of reply.
port: The port number returned.
addr: Optional IP address returned.
"""
reply_code: SOCKS4ReplyCode
port: int
addr: typing.Optional[str]
@classmethod
def loads(cls, data: bytes) -> "SOCKS4Reply":
"""Unpacks the reply data into an instance.
Returns:
The unpacked reply instance.
Raises:
ProtocolError: If the data does not match the spec.
"""
if len(data) != 8 or data[0:1] != b"\x00":
raise ProtocolError("Malformed reply")
try:
return cls(
reply_code=SOCKS4ReplyCode(data[1:2]),
port=int.from_bytes(data[2:4], byteorder="big"),
addr=decode_address(AddressType.IPV4, data[4:8]),
)
except ValueError as exc:
raise ProtocolError("Malformed reply") from exc
class SOCKS4Connection:
"""Encapsulates a SOCKS4 and SOCKS4A connection.
Packs request objects into data suitable to be send and unpacks reply
data into their appropriate reply objects.
Args:
user_id: The user ID to be sent as part of the requests.
"""
def send(self, request: typing.Union[SOCKS4Request, SOCKS4ARequest]) -> None:
"""Packs a request object and adds it to the send data buffer.
Args:
request: The request instance to be packed.
"""
user_id = request.user_id or self.user_id
self._data_to_send += request.dumps(user_id=user_id)
def receive_data(self, data: bytes) -> SOCKS4Reply:
"""Unpacks response data into a reply object.
Args:
data: The raw response data from the proxy server.
Returns:
The appropriate reply object.
"""
self._received_data += data
return SOCKS4Reply.loads(bytes(self._received_data))
def data_to_send(self) -> bytes:
"""Returns the data to be sent via the I/O library of choice.
Also clears the connection's buffer.
"""
data = bytes(self._data_to_send)
self._data_to_send = bytearray()
return data
| 30.858268 | 88 | 0.607043 | import enum
import typing
from ._types import StrOrBytes
from .exceptions import ProtocolError, SOCKSError
from .utils import (
AddressType,
decode_address,
encode_address,
get_address_port_tuple_from_address,
)
class SOCKS4ReplyCode(bytes, enum.Enum):
"""Enumeration of SOCKS4 reply codes."""
REQUEST_GRANTED = b"\x5A"
REQUEST_REJECTED_OR_FAILED = b"\x5B"
CONNECTION_FAILED = b"\x5C"
AUTHENTICATION_FAILED = b"\x5D"
class SOCKS4Command(bytes, enum.Enum):
"""Enumeration of SOCKS4 command codes."""
CONNECT = b"\x01"
BIND = b"\x02"
class SOCKS4Request(typing.NamedTuple):
"""Encapsulates a request to the SOCKS4 proxy server
Args:
command: The command to request.
port: The port number to connect to on the target host.
addr: IP address of the target host.
user_id: Optional user ID to be included in the request, if not supplied
the user *must* provide one in the packing operation.
"""
command: SOCKS4Command
port: int
addr: bytes
user_id: typing.Optional[bytes] = None
@classmethod
def from_address(
cls,
command: SOCKS4Command,
address: typing.Union[StrOrBytes, typing.Tuple[StrOrBytes, int]],
user_id: typing.Optional[bytes] = None,
) -> "SOCKS4Request":
"""Convenience class method to build an instance from command and address.
Args:
command: The command to request.
address: A string in the form 'HOST:PORT' or a tuple of ip address string
and port number.
user_id: Optional user ID.
Returns:
A SOCKS4Request instance.
Raises:
SOCKSError: If a domain name or IPv6 address was supplied.
"""
address, port = get_address_port_tuple_from_address(address)
atype, encoded_addr = encode_address(address)
if atype != AddressType.IPV4:
raise SOCKSError(
"IPv6 addresses and domain names are not supported by SOCKS4"
)
return cls(command=command, addr=encoded_addr, port=port, user_id=user_id)
def dumps(self, user_id: typing.Optional[bytes] = None) -> bytes:
"""Packs the instance into a raw binary in the appropriate form.
Args:
user_id: Optional user ID as an override, if not provided the instance's
will be used, if none was provided at initialization an error is raised.
Returns:
The packed request.
Raises:
SOCKSError: If no user was specified in this call or on initialization.
"""
user_id = user_id or self.user_id
if user_id is None:
raise SOCKSError("SOCKS4 requires a user_id, none was specified")
return b"".join(
[
b"\x04",
self.command,
(self.port).to_bytes(2, byteorder="big"),
self.addr,
user_id,
b"\x00",
]
)
class SOCKS4ARequest(typing.NamedTuple):
"""Encapsulates a request to the SOCKS4A proxy server
Args:
command: The command to request.
port: The port number to connect to on the target host.
addr: IP address of the target host.
user_id: Optional user ID to be included in the request, if not supplied
the user *must* provide one in the packing operation.
"""
command: SOCKS4Command
port: int
addr: bytes
user_id: typing.Optional[bytes] = None
@classmethod
def from_address(
cls,
command: SOCKS4Command,
address: typing.Union[StrOrBytes, typing.Tuple[StrOrBytes, int]],
user_id: typing.Optional[bytes] = None,
) -> "SOCKS4ARequest":
"""Convenience class method to build an instance from command and address.
Args:
command: The command to request.
address: A string in the form 'HOST:PORT' or a tuple of ip address string
and port number.
user_id: Optional user ID.
Returns:
A SOCKS4ARequest instance.
"""
address, port = get_address_port_tuple_from_address(address)
atype, encoded_addr = encode_address(address)
return cls(command=command, addr=encoded_addr, port=port, user_id=user_id)
def dumps(self, user_id: typing.Optional[bytes] = None) -> bytes:
"""Packs the instance into a raw binary in the appropriate form.
Args:
user_id: Optional user ID as an override, if not provided the instance's
will be used, if none was provided at initialization an error is raised.
Returns:
The packed request.
Raises:
SOCKSError: If no user was specified in this call or on initialization.
"""
user_id = user_id or self.user_id
if user_id is None:
raise SOCKSError("SOCKS4 requires a user_id, none was specified")
return b"".join(
[
b"\x04",
self.command,
(self.port).to_bytes(2, byteorder="big"),
b"\x00\x00\x00\xFF", # arbitrary final non-zero byte
user_id,
b"\x00",
self.addr,
b"\x00",
]
)
class SOCKS4Reply(typing.NamedTuple):
"""Encapsulates a reply from the SOCKS4 proxy server
Args:
reply_code: The code representing the type of reply.
port: The port number returned.
addr: Optional IP address returned.
"""
reply_code: SOCKS4ReplyCode
port: int
addr: typing.Optional[str]
@classmethod
def loads(cls, data: bytes) -> "SOCKS4Reply":
"""Unpacks the reply data into an instance.
Returns:
The unpacked reply instance.
Raises:
ProtocolError: If the data does not match the spec.
"""
if len(data) != 8 or data[0:1] != b"\x00":
raise ProtocolError("Malformed reply")
try:
return cls(
reply_code=SOCKS4ReplyCode(data[1:2]),
port=int.from_bytes(data[2:4], byteorder="big"),
addr=decode_address(AddressType.IPV4, data[4:8]),
)
except ValueError as exc:
raise ProtocolError("Malformed reply") from exc
class SOCKS4Connection:
"""Encapsulates a SOCKS4 and SOCKS4A connection.
Packs request objects into data suitable to be send and unpacks reply
data into their appropriate reply objects.
Args:
user_id: The user ID to be sent as part of the requests.
"""
def __init__(self, user_id: bytes):
self.user_id = user_id
self._data_to_send = bytearray()
self._received_data = bytearray()
def send(self, request: typing.Union[SOCKS4Request, SOCKS4ARequest]) -> None:
"""Packs a request object and adds it to the send data buffer.
Args:
request: The request instance to be packed.
"""
user_id = request.user_id or self.user_id
self._data_to_send += request.dumps(user_id=user_id)
def receive_data(self, data: bytes) -> SOCKS4Reply:
"""Unpacks response data into a reply object.
Args:
data: The raw response data from the proxy server.
Returns:
The appropriate reply object.
"""
self._received_data += data
return SOCKS4Reply.loads(bytes(self._received_data))
def data_to_send(self) -> bytes:
"""Returns the data to be sent via the I/O library of choice.
Also clears the connection's buffer.
"""
data = bytes(self._data_to_send)
self._data_to_send = bytearray()
return data
| 129 | 0 | 27 |
fd66240376fc34f8c1e678cfe86225d1902a282b | 108 | py | Python | PDF Reader/txt-reader.py | Rothamsted-Ecoinformatics/YieldBookDataTools | 156cec017846037a95feed9c3e2490df49485eac | [
"Apache-2.0"
] | null | null | null | PDF Reader/txt-reader.py | Rothamsted-Ecoinformatics/YieldBookDataTools | 156cec017846037a95feed9c3e2490df49485eac | [
"Apache-2.0"
] | null | null | null | PDF Reader/txt-reader.py | Rothamsted-Ecoinformatics/YieldBookDataTools | 156cec017846037a95feed9c3e2490df49485eac | [
"Apache-2.0"
] | null | null | null | f = open('Conventions.txt','r')
message = f.read()
print(message)
f.close()
print (message.splitlines())
| 13.5 | 31 | 0.666667 | f = open('Conventions.txt','r')
message = f.read()
print(message)
f.close()
print (message.splitlines())
| 0 | 0 | 0 |
f7c1c10f8bb7163eee813091865d9d31e524515e | 38,978 | py | Python | awsheet/helpers/securitygrouphelper.py | cryptographrix/awsheet | 345eb75ffc4bfb8a7cf9cd6df31d6a16067f769a | [
"Apache-2.0"
] | null | null | null | awsheet/helpers/securitygrouphelper.py | cryptographrix/awsheet | 345eb75ffc4bfb8a7cf9cd6df31d6a16067f769a | [
"Apache-2.0"
] | null | null | null | awsheet/helpers/securitygrouphelper.py | cryptographrix/awsheet | 345eb75ffc4bfb8a7cf9cd6df31d6a16067f769a | [
"Apache-2.0"
] | null | null | null | """This module implements the security group helper module for AWSHeet that is aimed at providing idempotent AWS EC2 security groups.
Currently, it only supports Security Groups that are in a VPC.
Rules are created using the SecurityGroupRule type and then they are collected together inside an iterable (usually a set is used).
This collection of rules is then passed along to the SecurityGroupHelper constructor which is also passed a name and a description.
Example 1 - give access to a specified list of IP addresses:
#create a new security group that gives access to the following ips to port 80
cidr_ips = ['192.168.0.1/32', '10.10.11.12/24', '155.246.0.0/16']
http_port = 80
rules_for_new_security_group = set()
for cidr_ip_x in cidr_ips:
rules_for_new_security_group.add(SecurityGroupRule(ip_protocol='tcp', from_port=http_port, to_port=http_port, cidr_ip=cidr_ip_x, src_group=None))
new_security_group = SecurityGroupHelper(name='New Test Group', description='just a simple example group', rules=rules_for_new_security_group)
Example 2 - give two seperate security groups mutual access to SSH / port 22:
sg1_rules = set() #- the set of rules for the first security group
sg2_rules = set() #- the set of rules for the second security group
#- a shared rule based on IP address and ICMP
all_icmp = SecurityGroupRule(ip_protocol='icmp', from_port=-1, to_port=-1, cidr_ip='0.0.0.0/0', src_group=None)
sg1_rules.add(all_icmp)
sg2_rules.add(all_icmp)
#- use an '@' symbol in the src_group name to specify a group by name, even if the group doesn't exist yet
sg1_rules.add(SecurtyGroupRule(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip=None, src_group='@securityGroup2'))
sg2_rules.add(SecurtyGroupRule(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip=None, src_group='@securityGroup1'))
#- create the actual groups
sg1 = SecurityGroupHelper(name='securityGroup1', description='example group 1', rules=sg1_rules)
sg2 = SecurityGroupHelper(name='securityGroup2', description='example group 2', rules=sg2_rules)
#- program exits
#- at program exit, the remaining dependencies will now be converged
#- this is an easy way of forward referencing when you create the rules so that the referenced groups
#- don't have to exist at that time, as long as they are created within the lifetime of the same program
"""
from .awshelper import AWSHelper
import time
import re
import os
import json
import subprocess
import tempfile
import argparse
import sys
import logging
import atexit
import boto
import boto.ec2
import boto.ec2.elb
import boto.cloudformation
import collections
import ipaddress
import boto.exception
import copy
#- used to wait between successive API calls
AWS_API_COOLDOWN_PERIOD = 1.0
#TODO: IMPLEMENT TAGGING
#- no need for a full class. These are simple tuples
#- TODO: actually having rules as immutables makes normalization more complex.
#- refactor this particular tuple into its own class and define rules of
#- interaction between security groups and rules they contain
#- as rules themselves do need access to the heet object and to the boto_sg
#- to perform some aspects of normalization
SecurityGroupRule = collections.namedtuple('SecurityGroupRule', ['ip_protocol', 'from_port', 'to_port', 'cidr_ip', 'src_group'])
#- rm_group: only try to delete the group, fail if the API call fails
#- rm_instances: delete all the instances in this group before attempting deletion of this security group
#- rm_enis: delete all of the Elastic Network Interfaces in this security group before attempting deletion of this security group
SecurityGroupDeleteMode = collections.namedtuple('SecurityGroupDeleteMode', ['rm_group', 'rm_instances', 'rm_enis'])
#- this defines the identity of the security group to Heet Code
#- as long as none of these change, we will converge the same AWS resource
#- VPC ID
#- Heet Project Name (Base Name / the name of the script)
#- Heet Environment (usually, testing, staging or production)
#- Security Group Name
SgTag = collections.namedtuple('SecurityGroupIDTag',[ 'environment', 'project_name', 'vpc_id', 'sg_name'])
class SecurityGroupHelper(AWSHelper):
"""modular and convergent security groups in VPC (and only in VPC)
Params"""
def normalize_aws_sg_rules(self, aws_sg):
"""AWS has grants and rules, but we work with them as a logical unit.
The rules have the ip_protocol, from_port, to_port while the grants have the remaining parameters,
which are the mutually exclusive group_id or cidr_ip parameters
Also normalize sg-ids that are references to 'self'
and convert the security group IDs to resource references for SGs in this module"""
boto_self = self.get_resource_object()
normalized_rules = set()
if aws_sg is not None:
for rule in aws_sg.rules:
for grant in rule.grants:
normalized_group_id = grant.group_id
rule = SecurityGroupRule(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, normalized_group_id)
#- be sure that we are always comparing similarly normalized rules
#- apply self.normalize_rule to API returned rules as well
normalized_rules.add(self.normalize_rule(rule))
return normalized_rules
def get_resource_object(self):
"""Get or create the Boto Version of this security group from EC2 via API"""
boto_group = None
#- build the tag and find it by tag
(tag_name, tag_value) = self.heet_id_tag
matching_groups = self.conn.get_all_security_groups(filters={'tag-key' : tag_name, 'tag-value' :tag_value})
if matching_groups:
#- if there's more than one security group in the same project and environment with the same name,
#- this is worthy of logging an error as it isn't expected
if len(matching_groups) > 1:
self.heet.logger.warn("multiple security groups returned!: search tag:[{}: {}]".format(tag_name, tag_value))
boto_group = matching_groups[0]
self.aws_id = boto_group.id
return boto_group
def get_or_create_resource_object(self):
"""Get or create the Boto Version of this security group from EC2 via API"""
(tag_name, tag_value) = self.heet_id_tag
boto_group = self.get_resource_object()
if not boto_group and not self.heet.args.destroy:
#- it doesn't exist yet
try:
self.heet.logger.debug('get_or_create_resource_object: creating new security group')
boto_group = self.conn.create_security_group(name=self.aws_name, description=self.description, vpc_id=self.vpc_id)
except boto.exception.EC2ResponseError as err:
print 'AWS EC2 API error: {} ({})'.format(err.message, err)
return None
self.heet.logger.debug('get_or_create_resource_object: successfully created new security group, waiting to tag')
time.sleep(AWS_API_COOLDOWN_PERIOD)
self.heet.logger.debug('get_or_create_resource_object: tagging new security group: [{}:{}]'.format(tag_name, tag_value))
try:
#- sometimes a short sleep isn't enough, and we really don't want to exit before tagging
#- as that makes the next convergence cycle fail until the group is deleted manually.
boto_group.add_tag(key=tag_name, value=tag_value)
self.heet.logger.debug('get_or_create_resource_object: successfully created new tagged group.')
self.aws_id = boto_group.id
except boto.exception.EC2ResponseError as err:
if err.code == 'InvalidGroup.NotFound':
self.heet.logger.debug('get_or_create_resource: setting ID tag failed. Waiting to try again...')
time.sleep(3)
boto_self.add_tag(key=tag_name, value=tag_value)
else:
raise err
return boto_group
def make_key_from_rule(self, rule):
"""Just join all the things together to make a unique string"""
key = '/'.join([str(rule.ip_protocol), str(rule.from_port), str(rule.to_port), str(rule.cidr_ip), str(rule.src_group)])
return key
def get_src_group_from_key(self, key):
"""Just undo make_key_from_rule to get the source group"""
return key.split('/')[-1]
def rule_fails_check(self, rule):
"""Checks that the rule has all the needed attributes
Returns a list of strings with error messages for each test the rule failed.
If it passes, then the list will be empty.
As well, this populates self.src_group_references dict"""
#- a list of all the ways that the rule has failed
rule_status = []
if str(rule.ip_protocol) not in ['tcp','udp', 'icmp', '-1']:
rule_status.append('bad value for ip_protocol in rule {}'.format(str(rule)))
#- try to convert to float to check if it is a valid port number
try:
if rule.from_port is not None and rule.from_port < 0 and rule.from_port != -1:
rule_status.append('rule from_port is a negative number that is not -1: [{}]'.format(rule.from_port))
raise TypeError()
float(rule.from_port)
except TypeError as err:
if rule.from_port is None:
pass
else:
rule_status.append('rule from port is not a valid integer')
try:
if rule.to_port is not None and rule.to_port < 0 and rule.to_port != -1:
rule_status.append('rule to_port is a negative number that is not -1: [{}]'.format(rule.to_port))
raise TypeError()
float(rule.to_port)
except TypeError as err:
if rule.to_port is None:
pass
else:
rule_status.append('rule to port is not a valid integer')
#- Check the (.cidr_ip, .src_group) pair compliance
#- need to have exactly one of src_group, cidr_ip
if rule.cidr_ip is not None:
#self.heet.logger.debug(' ^^^ rule has cidr_ip')
if rule.src_group is not None:
self.heet.logger.debug(' ^^^ rule has both cidr_ip and src_group')
rule_status.append('Can\'t have both cidr_ip and src_group set simultaneously: rule {}'.format(str(rule)))
else:
#self.heet.logger.debug(' ^^^ rule has only cidr_ip')
#- test the cidr_ip
try:
ipaddress.IPv4Network(unicode(rule.cidr_ip))
except ValueError as err:
#self.heet.logger.debug(' ^^^ rule has invalid cidr_ip')
rule_status.append('rule has an invalid cidr_ip value: [{}]'.format(rule.cidr_ip))
elif rule.cidr_ip is None and rule.src_group is None:
#self.heet.logger.debug(' ^^^ rule has neither cidr_ip nor src_group')
rule_status.append('Must specify one or other of [cidr_ip, src_group]')
else:
if rule.src_group == 'self':
#self.heet.logger.debug(' ^^^ rule src_group refers to "self"')
boto_self = self.get_or_create_resource_object()
if not boto_self:
return
self.src_group_references[boto_self.id] = boto_self
elif rule.src_group != 'self' and not self.rule_has_dependent_reference(rule):
#self.heet.logger.debug('^^^ rule that references AWS SG directly: {}'.format(rule.src_group))
#- get the boto object for the reference security group so we
#- can pass that object into boto's authorize() method
src_group_resource = self.conn.get_all_security_groups(group_ids=rule.src_group)
if len(src_group_resource) <= 0:
#self.heet.logger.debug('^^^ rule references another security group ID [{}] that doesn\'t exist'.format(rule.src_group))
rule_status.append('References another security group ID [{}] that doesn\'t exist'.format(rule.src_group))
else:
self.src_group_references[rule.src_group] = src_group_resource[0]
self.heet.logger.debug('added src_group_references[{}]'.format(rule.src_group))
elif self.heet.is_resource_ref(rule.src_group):
#- this is a reference to another heet security group helper object
#- we should make sure that this actually exists before saying its okay
#- but we can only do that after we have a comprehensive list of all the
#- security groups to be created, which we will only have at the end of the
#- program.
#- So here, we add this name to a list of things which will be done at exit.
#self.heet.logger.debug('^^^ rule seems to be a new style resource reference.')
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
return rule_status
def is_aws_reference(self, src_group):
"""Check if the src_group argument looks like an AWS security group ID
Just means the first three characters are 'sg-'"""
is_ref = False
if src_group and src_group[0] == 's' and src_group[1] == 'g' and src_group[2] == '-' and len(src_group.split('-')) == 2:
is_ref = True
return is_ref
def get_boto_src_group(self, src_group):
"""src_group can be:
* @resource-reference
* 'sg-xxxxxxx'
Return a boto object that can be used in authorize / revoke"""
boto_sg = None
if self.heet.is_resource_ref(src_group):
self.heet.logger.debug('get_boto_src_group: will try to look [{}] up as heet resource ref'.format(src_group))
try:
rr = self.heet.resource_refs[src_group]
boto_sg = rr.get_resource_object()
except KeyError as err:
self.heet.logger.debug('get_boto_src_group: failed to lookup [{}] in heet resource refs table'.format(src_group))
boto_sg = None
elif self.is_aws_reference(src_group):
self.heet.logger.debug('get_boto_src_group: will try to retrieve sg id [{}] from AWS API'.format(src_group))
#XXX we should actually get it by tag
# move create tag to be a utility function
#(tag_name, tag_value) = self.heet_id_tag
#matching_groups = self.conn.get_all_security_groups(filters={'tag-key' : tag_name, 'tag-value' :tag_value})
matching_groups = self.conn.get_all_security_groups(group_ids=[src_group])
if not matching_groups:
self.heet.logger.debug('get_boto_src_group: aws returned no groups with tag ([{}],[{}])'.format(tag_name, tag_value))
boto_sg = None
else:
self.heet.logger.debug('get_boto_src_group: aws returned matching group')
boto_sg = matching_groups[0]
else:
self.heet.logger.debug('get_boto_src_group: can not tell what type of src_group format this is: [{}]'.format(src_group))
boto_sg = None
return boto_sg
def normalize_rule(self, rule):
"""Normalize SecurityGroupRule attributes that can have multiple values representing the same thing into one well-defined value
Currently only checks from_port and to_port for '-1' or None and normalizes them to be None as that's what the API returns"""
#- make a mutable copy
new_rule = {'ip_protocol' : rule.ip_protocol,
'from_port' : rule.from_port,
'to_port' : rule.to_port,
'cidr_ip' : rule.cidr_ip,
'src_group' : rule.src_group }
#- just go through and normalize all the values one by one and make a new rule at the end
#- out of all the stuff we collect throughout the normalization tests
if new_rule['src_group'] == 'self':
#- normalize_rule called from add_rules which is called from init, so we may not exist: call get_or_create.
boto_self = self.get_or_create_resource_object()
if not boto_self:
return rule
new_rule['src_group'] = boto_self.id
if self.heet.is_resource_ref(new_rule['src_group']):
try:
#- try to look it up
self.heet.logger.debug('Normalizing resource_reference: {}'.format(rule.src_group))
#boto_sg = self.heet.resource_refs[new_rule['src_group']].get_resource_object()
boto_sg = self.get_boto_src_group(rule.src_group)
if boto_sg:
self.heet.logger.debug('*** resolved resource_reference: {}'.format(rule.src_group))
self.heet.logger.debug('*** adding local resource_reference: {}'.format(rule.src_group))
self.src_group_references[boto_sg.id] = boto_sg
new_rule['src_group'] = boto_sg.id
else:
self.heet.logger.debug('normalize_rule: get_resource_object returned nothing for group: {}.'.format(rule.src_group))
except KeyError as err:
self.heet.logger.debug('*** normalize_rule: resource_reference not found: {}, will handle in 2nd pass'.format(rule.src_group))
#- it wasn't in the reference table yet,
#- we'll handle this in converge() and converge_dependency()
pass
if rule.ip_protocol == -1:
self.heet.logger.debug('Normalizing ip_protocol: {} to str(-1)'.format(rule.ip_protocol))
new_rule['ip_protocol'] = '-1'
#- we check for None explicitly also to short-circuit else the int() will fail w/ TypeError and we want it to pass
if new_rule['from_port'] is None or new_rule['to_port'] is None or int(new_rule['from_port']) == -1 or int(new_rule['to_port']) == -1:
#self.heet.logger.debug('Normalizing port range: {} .. {} to [None .. None]'.format(rule.from_port, rule.to_port))
new_rule['from_port'] = None
new_rule['to_port'] = None
final_rule = SecurityGroupRule(new_rule['ip_protocol'], new_rule['from_port'], new_rule['to_port'], new_rule['cidr_ip'], new_rule['src_group'])
return final_rule
def add_rule(self, rule):
"""Print out why a rule fails to be added, else add a rule to this security group
Rule will be normalized and added to one of two lists of rules:
One group is for rules that can be converged immediately
(those ones have no src_group resource references)
The other group is for rules that will be converged after the resource
reference table has been built
"""
normalized_rule = self.normalize_rule(rule)
failures = self.rule_fails_check(normalized_rule)
if not failures:
self.rules.add(normalized_rule)
else:
for err in failures:
self.heet.logger.error('Security Group failed sanity checks: ')
self.heet.logger.error(' : ' + err)
return
def build_heet_id_tag(self):
"""The tag is what defines a security group as a unique component of heet code
This format has the following consequences:
* you can change the id of a security group and still converge
* you can not converge across projects, environments or sgs with different names, or different VPCs
* you can change the rules of an SG and converge"""
sg_tag = SgTag(self.heet.get_environment(), self.heet.base_name, self.vpc_id, self.aws_name)
tag_value = '/'.join(sg_tag)
tag_name = 'AWSHeet'
return (tag_name, tag_value)
def build_aws_name(self, base_name):
"""The name of the security group is basically the Tag concatenated in order, minus the vpc id
NB: AWS only determines SG uniqueness by (VPC_ID, SG Name), so if you want the same code for different environments,
you have to add some additional environment-specific info to the name"""
return '-'.join([self.heet.get_environment(), self.heet.base_name, base_name])
def rule_has_dependent_reference(self, rule):
"""Check if the rule refers to a security group that is another Heet object
For now, we do that by passing in the heet base_name of the group prefixed with an '@'"""
return self.heet.is_resource_ref(rule.src_group)
def base_name_to_ref(self, base_name):
"""Converts the Heet Script's SG base name into a name reference.
Currently, this just means that it is prepended with an '@'"""
return '@' + base_name
def ref_to_base_name(self,base_name):
"""The opposite of the above."""
if base_name[0] == '@':
return base_name[1:]
else:
self.heet.logger.error("Trying to dereference a SG name that isn't a reference: {}".format(base_name))
return None
def converge(self):
"""Adds missing rules, revokes extra rules, creates entire group if necessary
if the rule can't be converged yet (due to an unresolveable resource reference,
we'll let heet know to call us at the module exit time and re-try via converge_dependency()
when we have the full module resource reference table"""
self.heet.logger.info("Converging security group: %s" % self.aws_name)
boto_self = self.get_resource_object()
if boto_self is None:
self.heet.logger.debug("Creating new group: %s" % self.aws_name)
boto_self = self.conn.create_security_group(self.aws_name, self.description, self.vpc_id)
self.aws_id = boto_self.id
remote_rules = set()
(tag_name,tag_value) = self.heet_id_tag
try:
boto_self.add_tag(key=tag_name, value=tag_value)
except boto.exception.EC2ResponseError as err:
if err.code == 'InvalidGroup.NotFound':
#- wait for API consistency - sleep momentarily before adding tag
self.heet.logger.debug('converge: set_tag failed due to SG not found. Waiting a moment then trying again.')
time.sleep(3)
boto_self.add_tag(key=tag_name, value=tag_value)
else:
self.heet.logger.debug("Using pre-existing group: %s" % self.aws_name)
self.aws_id = boto_self.id
remote_rules = set(self.normalize_aws_sg_rules(boto_self))
self.src_group_references['self'] = boto_self
self.src_group_references[boto_self.id] = boto_self
if self.rules:
desired_rules = set(self.rules)
else:
desired_rules = set()
for rule in desired_rules:
#- if it isn't there, add it
if rule in remote_rules:
#- the rule we want to add is already there, so skip it
self.heet.logger.debug("Already Authorized: %s on %s" % (rule, self))
else:
if rule.src_group:
#- check if this rule can be converged now or later
if self.rule_has_dependent_reference(rule):
self.heet.logger.debug("-- Rule refers to another Heet group. Will converge_dependency() atexit: {}".format(rule))
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
elif self.is_aws_reference(rule.src_group):
#- use the src_group object we already got when we checked the rule
self.heet.logger.info("Adding Authorization: %s on %s" % (rule, self))
try:
boto_self.authorize(rule.ip_protocol,rule.from_port, rule.to_port,rule.cidr_ip, self.src_group_references[rule.src_group])
except KeyError as err:
print ""
print ""
print 'FATAL ERROR: key error in src_group_references. looked for [{}] in:'.format(rule.src_group)
print self.src_group_references
print ""
print ""
os._exit(-1)
else:
print "Unexpected Rule format: {}".format(rule)
raise AttributeError('Source Group reference can NOT be converged')
else:
boto_self.authorize(rule.ip_protocol,rule.from_port, rule.to_port,rule.cidr_ip)
#- remove all the rules that we didn't explicitly declare we want in this group
#- if they can currently be resolved (can only resolve names present in the resource reference table at the moment
#- of execution. )
#- any desired rule that is still in resource reference form because it couldn't be resolved yet will not match
#- anything, so we remove all the resource reference rules from the desired rules before comparison
desired_rules_copy = copy.copy(desired_rules)
for rule in desired_rules_copy:
if self.rule_has_dependent_reference(rule):
desired_rules.discard(rule)
for rule in remote_rules:
if rule not in desired_rules:
if self.is_aws_reference(rule.src_group):
#- skip this rule for now
self.heet.logger.debug('converge: skipping rule with aws sg id: [{}]'.format(rule))
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
#- continue looping, but skip this rule now that we've registered it for convergence at exit
continue
else:
self.heet.logger.info("Removing remote rule not declared locally: {} in {}".format(rule, self))
print ""
print ""
print "DEBUG: removing rule"
print "remote: "
print str(remote_rules)
print ""
print "current rule being tested: "
print str(rule)
print ""
print "desired rules: "
print str(desired_rules)
print ""
print ""
#- boto-specific: get the referring security group boto-level object to delete this rule
#- TODO: this may be redundant if normalization strips the boto object for the src_group
#- as I'm resolving here. This isn't necessary if the pre-normalized rule has the object in it
ref_sg = None
if rule.src_group is not None:
if rule.src_group == 'self':
ref_sg = [self.get_or_create_resource_object()]
elif self.is_aws_reference(rule.src_group):
ref_sg = self.conn.get_all_security_groups(group_ids=rule.src_group)
if len(ref_sg) >= 1:
ref_sg = ref_sg[0]
else:
self.heet.logger.error("Rule to delete references another Security Group that no longer exists. Will fail...")
reg_sg = None
if rule.src_group is not None and ref_sg is None:
#- if we didn't just find it, skip it for now
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
else:
boto_self.revoke(rule.ip_protocol, rule.from_port, rule.to_port, rule.cidr_ip, ref_sg)
#- Post Converge Hook
self.post_converge_hook()
def converge_dependent_add_rule(self, init_rule):
"""Called from converge_dependency for the rules that needed to be added
but used a resource reference that couldn't yet be resolved on first pass in converge()"""
boto_self = self.get_resource_object()
resource_name = init_rule.src_group
boto_src_group = self.heet.resource_refs[resource_name].get_resource_object()
#- TODO: clean this up
#- we need the ID for comparisons, but we need the object for the API call
#- and we start with a resource reference
new_rule = SecurityGroupRule(init_rule.ip_protocol,
init_rule.from_port,
init_rule.to_port,
init_rule.cidr_ip,
boto_src_group.id)
normalized_rule = self.normalize_rule(new_rule)
final_rule = SecurityGroupRule(normalized_rule.ip_protocol,
normalized_rule.from_port,
normalized_rule.to_port,
normalized_rule.cidr_ip,
boto_src_group)
remote_rules = self.normalize_aws_sg_rules(boto_self)
if normalized_rule not in remote_rules:
boto_self.authorize(final_rule.ip_protocol, final_rule.from_port, final_rule.to_port, final_rule.cidr_ip, final_rule.src_group)
time.sleep(AWS_API_COOLDOWN_PERIOD)
return
def converge_dependent_remove_test(self, remote_rule):
"""Take this rule that has an AWS SG ID and is an existing remote rule and now check if this rule is a desired rule or not."""
#- first take all the current desired rules and re-normalize them so the resource references will be looked up
boto_self = self.get_resource_object()
desired_rules = set()
for rule_x in self.rules:
desired_rules.add(self.normalize_rule(rule_x))
if remote_rule not in desired_rules:
self.heet.logger.debug('converge_dependent_remove_test: removing rule [{}]'.format(remote_rule))
boto_src_group = self.get_boto_src_group(remote_rule.src_group)
boto_self.revoke(remote_rule.ip_protocol, remote_rule.from_port, remote_rule.to_port, remote_rule.cidr_ip, boto_src_group)
return
def converge_dependency(self, key):
"""converge_at_exit: this convergence pattern is different than the single-call of converge.
converge_dependency will be called once for every rule that needed to be converged at exit.
This is where we converge the rules that refer to other security groups that are declared in the same AWSHeet module
Dependencies here is any security group rule that referenced another Heet group that is being declared in this script.
If it is the first time the group is created, the referenced group will not exist yet, and so the rule will fail convergence.
So, to keep it simple, any group that refers to another group in a Heet script will be put off to be converged after we are
sure that the creation of the rule should not fail unless there has been an actual error."""
self.heet.logger.debug("----CONVERGE_DEPENDENCY() {}: {}---- {} of {} rules to process".format(self.base_name, key, self._num_converged_dependencies+1, len(self.dependent_rules)))
self._num_converged_dependencies += 1
boto_self = self.get_resource_object()
if not boto_self:
self.heet.logger.debug('converge_dependency: no boto_object found. returning without issuing any API calls')
return
#- lookup the rule as it was when we saved it
init_rule = self.dependent_rules[key]
#- grab the group we need from the resource references
if key == 'DESTROY_AGAIN':
self.heet.logger.debug('converge_dependency: destroying 2nd round')
self.destroy()
else:
src_group_name = self.get_src_group_from_key(key)
if self.heet.is_resource_ref(src_group_name):
#- a bit opaque, but resource references are only called for rules that are trying
#- to be added, so we know if we see a resource reference here that this rule was
#- trying to be added and failed due to a resource reference being unable to be resolved
self.heet.logger.debug('converge_dependency: add_rule detected: [{}]'.format(init_rule))
self.converge_dependent_add_rule(init_rule)
elif self.is_aws_reference(src_group_name):
#- equally opaque, the only other rules we register to be called back for are rules
#- that existed remotely that referred to an AWS ID that we couldn't look up at the time
#- that we needed to check if it should be removed or not
self.heet.logger.debug('converge_dependency: remove_test detected: [{}]'.format(init_rule))
self.converge_dependent_remove_test(init_rule)
return
def destroy(self):
"""Try to remove everything from existence."""
boto_self = self.get_resource_object()
if not boto_self:
self.heet.logger.debug("destroy [{}]: no resource object found, returning without any API calls.".format(self.base_name))
return
#- Pre Destroy Hook
self.pre_destroy_hook()
self.heet.logger.info("deleting SecurityGroup [{}]".format(self.aws_name))
#- first delete any src_group rules so the group can be deleted
self.heet.logger.debug('destroy [{}]: testing [{}] rules to remove ones w/ src_groups'.format(self.aws_name, len(boto_self.rules)))
rules_copy = copy.deepcopy(boto_self.rules)
if isinstance(rules_copy, collections.Iterable) and len(rules_copy) > 0:
for boto_rule in rules_copy:
self.heet.logger.debug('destroy [{}]: testing rule for src_group: [{}]'.format(self.aws_name, boto_rule))
for boto_grant in boto_rule.grants:
if boto_grant.group_id is not None:
self.heet.logger.debug('destroy [{}]: found rule with group_id: [{}]'.format(self.aws_name, boto_grant.group_id))
try:
src_group_ref = self.conn.get_all_security_groups(group_ids=[boto_grant.group_id])[0]
self.heet.logger.debug('destroy [{}]: removing rule with src_group to remove group.({}:{})'.format(self.aws_name, boto_grant.group_id, src_group_ref.name))
boto_self.revoke(boto_rule.ip_protocol, boto_rule.from_port, boto_rule.to_port, boto_grant.cidr_ip, src_group_ref)
time.sleep(AWS_API_COOLDOWN_PERIOD)
except boto.exception.EC2ResponseError as err:
self.heet.logger.debug('destroy [{}]: failed to remove rule: [{}]'.format(self.aws_name, err.message))
self.heet.logger.debug('destroy [{}]: done removing rules.'.format(self.aws_name))
try:
time.sleep(AWS_API_COOLDOWN_PERIOD)
boto_self.delete()
self.heet.logger.info('Successfully deleted group {}.'.format(self.aws_name))
except boto.exception.EC2ResponseError as err:
if 'DESTROY_AGAIN' in self.dependent_rules:
self.heet.logger.info("*** Unable to delete {}. {}".format(self.aws_name, err.message))
else:
#- try again after all the other groups rules are deleted
self.heet.add_dependent_resource(self, 'DESTROY_AGAIN')
self.dependent_rules['DESTROY_AGAIN'] = 'placeholder'
return
| 49.90781 | 187 | 0.627841 | """This module implements the security group helper module for AWSHeet that is aimed at providing idempotent AWS EC2 security groups.
Currently, it only supports Security Groups that are in a VPC.
Rules are created using the SecurityGroupRule type and then they are collected together inside an iterable (usually a set is used).
This collection of rules is then passed along to the SecurityGroupHelper constructor which is also passed a name and a description.
Example 1 - give access to a specified list of IP addresses:
#create a new security group that gives access to the following ips to port 80
cidr_ips = ['192.168.0.1/32', '10.10.11.12/24', '155.246.0.0/16']
http_port = 80
rules_for_new_security_group = set()
for cidr_ip_x in cidr_ips:
rules_for_new_security_group.add(SecurityGroupRule(ip_protocol='tcp', from_port=http_port, to_port=http_port, cidr_ip=cidr_ip_x, src_group=None))
new_security_group = SecurityGroupHelper(name='New Test Group', description='just a simple example group', rules=rules_for_new_security_group)
Example 2 - give two seperate security groups mutual access to SSH / port 22:
sg1_rules = set() #- the set of rules for the first security group
sg2_rules = set() #- the set of rules for the second security group
#- a shared rule based on IP address and ICMP
all_icmp = SecurityGroupRule(ip_protocol='icmp', from_port=-1, to_port=-1, cidr_ip='0.0.0.0/0', src_group=None)
sg1_rules.add(all_icmp)
sg2_rules.add(all_icmp)
#- use an '@' symbol in the src_group name to specify a group by name, even if the group doesn't exist yet
sg1_rules.add(SecurtyGroupRule(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip=None, src_group='@securityGroup2'))
sg2_rules.add(SecurtyGroupRule(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip=None, src_group='@securityGroup1'))
#- create the actual groups
sg1 = SecurityGroupHelper(name='securityGroup1', description='example group 1', rules=sg1_rules)
sg2 = SecurityGroupHelper(name='securityGroup2', description='example group 2', rules=sg2_rules)
#- program exits
#- at program exit, the remaining dependencies will now be converged
#- this is an easy way of forward referencing when you create the rules so that the referenced groups
#- don't have to exist at that time, as long as they are created within the lifetime of the same program
"""
from .awshelper import AWSHelper
import time
import re
import os
import json
import subprocess
import tempfile
import argparse
import sys
import logging
import atexit
import boto
import boto.ec2
import boto.ec2.elb
import boto.cloudformation
import collections
import ipaddress
import boto.exception
import copy
#- used to wait between successive API calls
AWS_API_COOLDOWN_PERIOD = 1.0
#TODO: IMPLEMENT TAGGING
#- no need for a full class. These are simple tuples
#- TODO: actually having rules as immutables makes normalization more complex.
#- refactor this particular tuple into its own class and define rules of
#- interaction between security groups and rules they contain
#- as rules themselves do need access to the heet object and to the boto_sg
#- to perform some aspects of normalization
SecurityGroupRule = collections.namedtuple('SecurityGroupRule', ['ip_protocol', 'from_port', 'to_port', 'cidr_ip', 'src_group'])
#- rm_group: only try to delete the group, fail if the API call fails
#- rm_instances: delete all the instances in this group before attempting deletion of this security group
#- rm_enis: delete all of the Elastic Network Interfaces in this security group before attempting deletion of this security group
SecurityGroupDeleteMode = collections.namedtuple('SecurityGroupDeleteMode', ['rm_group', 'rm_instances', 'rm_enis'])
#- this defines the identity of the security group to Heet Code
#- as long as none of these change, we will converge the same AWS resource
#- VPC ID
#- Heet Project Name (Base Name / the name of the script)
#- Heet Environment (usually, testing, staging or production)
#- Security Group Name
SgTag = collections.namedtuple('SecurityGroupIDTag',[ 'environment', 'project_name', 'vpc_id', 'sg_name'])
class SecurityGroupHelper(AWSHelper):
"""modular and convergent security groups in VPC (and only in VPC)
Params"""
def __init__(self, heet, base_name, description, rules=None, vpc_id=None, rm_group=True, rm_instances=False, rm_enis=False):
self.heet = heet
self.base_name = base_name
self.description = description
self.aws_name = self.build_aws_name(self.base_name)
self.region = self.heet.get_region()
self.vpc_id = self.heet.get_value('vpc_id', required=True)
self._resource_object = None
self.delete_modes = SecurityGroupDeleteMode(rm_group, rm_instances, rm_enis)
self.aws_id = None
#- helps to know how many we have done, how many left
self._num_converged_dependencies = 0
self.heet.logger.debug('^^^ SGH init: [{}]'.format(self.base_name))
#- these are actually dependent on the above working
self.heet_id_tag = self.build_heet_id_tag()
self.conn = boto.ec2.connect_to_region(
self.region,
aws_access_key_id=heet.access_key_id,
aws_secret_access_key=heet.secret_access_key)
#- when we create a rule that references another group
#- we have to check that group exists
#- so, when we do that check, we cache the resulting objects
#- here. Saves extra calls to the API, which can be throttled.
self.src_group_references = dict()
self.rules = set()
#- this is where we put the rules that refer to other AWSHeet SGs that are also declared
#- in this same module. Dict Key for each is the rule's src_group attribute
self.dependent_rules = dict()
#- this will actually make API calls
#- to get the source group reference objects
if rules is not None:
for rule in rules:
self.add_rule(rule)
#- Post Init Hook
self.post_init_hook()
#- add ourselves to the heet dict so we are reachable by an '@' reference
heet.add_resource_ref(self, self.base_name_to_ref(self.base_name))
# this will callback the new instance's securitygrouphelper.converge()
heet.add_resource(self)
def __str__(self):
return "SecurityGroup %s" % self.aws_name
def normalize_aws_sg_rules(self, aws_sg):
"""AWS has grants and rules, but we work with them as a logical unit.
The rules have the ip_protocol, from_port, to_port while the grants have the remaining parameters,
which are the mutually exclusive group_id or cidr_ip parameters
Also normalize sg-ids that are references to 'self'
and convert the security group IDs to resource references for SGs in this module"""
boto_self = self.get_resource_object()
normalized_rules = set()
if aws_sg is not None:
for rule in aws_sg.rules:
for grant in rule.grants:
normalized_group_id = grant.group_id
rule = SecurityGroupRule(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, normalized_group_id)
#- be sure that we are always comparing similarly normalized rules
#- apply self.normalize_rule to API returned rules as well
normalized_rules.add(self.normalize_rule(rule))
return normalized_rules
def get_resource_object(self):
"""Get or create the Boto Version of this security group from EC2 via API"""
boto_group = None
#- build the tag and find it by tag
(tag_name, tag_value) = self.heet_id_tag
matching_groups = self.conn.get_all_security_groups(filters={'tag-key' : tag_name, 'tag-value' :tag_value})
if matching_groups:
#- if there's more than one security group in the same project and environment with the same name,
#- this is worthy of logging an error as it isn't expected
if len(matching_groups) > 1:
self.heet.logger.warn("multiple security groups returned!: search tag:[{}: {}]".format(tag_name, tag_value))
boto_group = matching_groups[0]
self.aws_id = boto_group.id
return boto_group
def get_or_create_resource_object(self):
"""Get or create the Boto Version of this security group from EC2 via API"""
(tag_name, tag_value) = self.heet_id_tag
boto_group = self.get_resource_object()
if not boto_group and not self.heet.args.destroy:
#- it doesn't exist yet
try:
self.heet.logger.debug('get_or_create_resource_object: creating new security group')
boto_group = self.conn.create_security_group(name=self.aws_name, description=self.description, vpc_id=self.vpc_id)
except boto.exception.EC2ResponseError as err:
print 'AWS EC2 API error: {} ({})'.format(err.message, err)
return None
self.heet.logger.debug('get_or_create_resource_object: successfully created new security group, waiting to tag')
time.sleep(AWS_API_COOLDOWN_PERIOD)
self.heet.logger.debug('get_or_create_resource_object: tagging new security group: [{}:{}]'.format(tag_name, tag_value))
try:
#- sometimes a short sleep isn't enough, and we really don't want to exit before tagging
#- as that makes the next convergence cycle fail until the group is deleted manually.
boto_group.add_tag(key=tag_name, value=tag_value)
self.heet.logger.debug('get_or_create_resource_object: successfully created new tagged group.')
self.aws_id = boto_group.id
except boto.exception.EC2ResponseError as err:
if err.code == 'InvalidGroup.NotFound':
self.heet.logger.debug('get_or_create_resource: setting ID tag failed. Waiting to try again...')
time.sleep(3)
boto_self.add_tag(key=tag_name, value=tag_value)
else:
raise err
return boto_group
def make_key_from_rule(self, rule):
"""Just join all the things together to make a unique string"""
key = '/'.join([str(rule.ip_protocol), str(rule.from_port), str(rule.to_port), str(rule.cidr_ip), str(rule.src_group)])
return key
def get_src_group_from_key(self, key):
"""Just undo make_key_from_rule to get the source group"""
return key.split('/')[-1]
def rule_fails_check(self, rule):
"""Checks that the rule has all the needed attributes
Returns a list of strings with error messages for each test the rule failed.
If it passes, then the list will be empty.
As well, this populates self.src_group_references dict"""
#- a list of all the ways that the rule has failed
rule_status = []
if str(rule.ip_protocol) not in ['tcp','udp', 'icmp', '-1']:
rule_status.append('bad value for ip_protocol in rule {}'.format(str(rule)))
#- try to convert to float to check if it is a valid port number
try:
if rule.from_port is not None and rule.from_port < 0 and rule.from_port != -1:
rule_status.append('rule from_port is a negative number that is not -1: [{}]'.format(rule.from_port))
raise TypeError()
float(rule.from_port)
except TypeError as err:
if rule.from_port is None:
pass
else:
rule_status.append('rule from port is not a valid integer')
try:
if rule.to_port is not None and rule.to_port < 0 and rule.to_port != -1:
rule_status.append('rule to_port is a negative number that is not -1: [{}]'.format(rule.to_port))
raise TypeError()
float(rule.to_port)
except TypeError as err:
if rule.to_port is None:
pass
else:
rule_status.append('rule to port is not a valid integer')
#- Check the (.cidr_ip, .src_group) pair compliance
#- need to have exactly one of src_group, cidr_ip
if rule.cidr_ip is not None:
#self.heet.logger.debug(' ^^^ rule has cidr_ip')
if rule.src_group is not None:
self.heet.logger.debug(' ^^^ rule has both cidr_ip and src_group')
rule_status.append('Can\'t have both cidr_ip and src_group set simultaneously: rule {}'.format(str(rule)))
else:
#self.heet.logger.debug(' ^^^ rule has only cidr_ip')
#- test the cidr_ip
try:
ipaddress.IPv4Network(unicode(rule.cidr_ip))
except ValueError as err:
#self.heet.logger.debug(' ^^^ rule has invalid cidr_ip')
rule_status.append('rule has an invalid cidr_ip value: [{}]'.format(rule.cidr_ip))
elif rule.cidr_ip is None and rule.src_group is None:
#self.heet.logger.debug(' ^^^ rule has neither cidr_ip nor src_group')
rule_status.append('Must specify one or other of [cidr_ip, src_group]')
else:
if rule.src_group == 'self':
#self.heet.logger.debug(' ^^^ rule src_group refers to "self"')
boto_self = self.get_or_create_resource_object()
if not boto_self:
return
self.src_group_references[boto_self.id] = boto_self
elif rule.src_group != 'self' and not self.rule_has_dependent_reference(rule):
#self.heet.logger.debug('^^^ rule that references AWS SG directly: {}'.format(rule.src_group))
#- get the boto object for the reference security group so we
#- can pass that object into boto's authorize() method
src_group_resource = self.conn.get_all_security_groups(group_ids=rule.src_group)
if len(src_group_resource) <= 0:
#self.heet.logger.debug('^^^ rule references another security group ID [{}] that doesn\'t exist'.format(rule.src_group))
rule_status.append('References another security group ID [{}] that doesn\'t exist'.format(rule.src_group))
else:
self.src_group_references[rule.src_group] = src_group_resource[0]
self.heet.logger.debug('added src_group_references[{}]'.format(rule.src_group))
elif self.heet.is_resource_ref(rule.src_group):
#- this is a reference to another heet security group helper object
#- we should make sure that this actually exists before saying its okay
#- but we can only do that after we have a comprehensive list of all the
#- security groups to be created, which we will only have at the end of the
#- program.
#- So here, we add this name to a list of things which will be done at exit.
#self.heet.logger.debug('^^^ rule seems to be a new style resource reference.')
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
return rule_status
def is_aws_reference(self, src_group):
"""Check if the src_group argument looks like an AWS security group ID
Just means the first three characters are 'sg-'"""
is_ref = False
if src_group and src_group[0] == 's' and src_group[1] == 'g' and src_group[2] == '-' and len(src_group.split('-')) == 2:
is_ref = True
return is_ref
def get_boto_src_group(self, src_group):
"""src_group can be:
* @resource-reference
* 'sg-xxxxxxx'
Return a boto object that can be used in authorize / revoke"""
boto_sg = None
if self.heet.is_resource_ref(src_group):
self.heet.logger.debug('get_boto_src_group: will try to look [{}] up as heet resource ref'.format(src_group))
try:
rr = self.heet.resource_refs[src_group]
boto_sg = rr.get_resource_object()
except KeyError as err:
self.heet.logger.debug('get_boto_src_group: failed to lookup [{}] in heet resource refs table'.format(src_group))
boto_sg = None
elif self.is_aws_reference(src_group):
self.heet.logger.debug('get_boto_src_group: will try to retrieve sg id [{}] from AWS API'.format(src_group))
#XXX we should actually get it by tag
# move create tag to be a utility function
#(tag_name, tag_value) = self.heet_id_tag
#matching_groups = self.conn.get_all_security_groups(filters={'tag-key' : tag_name, 'tag-value' :tag_value})
matching_groups = self.conn.get_all_security_groups(group_ids=[src_group])
if not matching_groups:
self.heet.logger.debug('get_boto_src_group: aws returned no groups with tag ([{}],[{}])'.format(tag_name, tag_value))
boto_sg = None
else:
self.heet.logger.debug('get_boto_src_group: aws returned matching group')
boto_sg = matching_groups[0]
else:
self.heet.logger.debug('get_boto_src_group: can not tell what type of src_group format this is: [{}]'.format(src_group))
boto_sg = None
return boto_sg
def normalize_rule(self, rule):
"""Normalize SecurityGroupRule attributes that can have multiple values representing the same thing into one well-defined value
Currently only checks from_port and to_port for '-1' or None and normalizes them to be None as that's what the API returns"""
#- make a mutable copy
new_rule = {'ip_protocol' : rule.ip_protocol,
'from_port' : rule.from_port,
'to_port' : rule.to_port,
'cidr_ip' : rule.cidr_ip,
'src_group' : rule.src_group }
#- just go through and normalize all the values one by one and make a new rule at the end
#- out of all the stuff we collect throughout the normalization tests
if new_rule['src_group'] == 'self':
#- normalize_rule called from add_rules which is called from init, so we may not exist: call get_or_create.
boto_self = self.get_or_create_resource_object()
if not boto_self:
return rule
new_rule['src_group'] = boto_self.id
if self.heet.is_resource_ref(new_rule['src_group']):
try:
#- try to look it up
self.heet.logger.debug('Normalizing resource_reference: {}'.format(rule.src_group))
#boto_sg = self.heet.resource_refs[new_rule['src_group']].get_resource_object()
boto_sg = self.get_boto_src_group(rule.src_group)
if boto_sg:
self.heet.logger.debug('*** resolved resource_reference: {}'.format(rule.src_group))
self.heet.logger.debug('*** adding local resource_reference: {}'.format(rule.src_group))
self.src_group_references[boto_sg.id] = boto_sg
new_rule['src_group'] = boto_sg.id
else:
self.heet.logger.debug('normalize_rule: get_resource_object returned nothing for group: {}.'.format(rule.src_group))
except KeyError as err:
self.heet.logger.debug('*** normalize_rule: resource_reference not found: {}, will handle in 2nd pass'.format(rule.src_group))
#- it wasn't in the reference table yet,
#- we'll handle this in converge() and converge_dependency()
pass
if rule.ip_protocol == -1:
self.heet.logger.debug('Normalizing ip_protocol: {} to str(-1)'.format(rule.ip_protocol))
new_rule['ip_protocol'] = '-1'
#- we check for None explicitly also to short-circuit else the int() will fail w/ TypeError and we want it to pass
if new_rule['from_port'] is None or new_rule['to_port'] is None or int(new_rule['from_port']) == -1 or int(new_rule['to_port']) == -1:
#self.heet.logger.debug('Normalizing port range: {} .. {} to [None .. None]'.format(rule.from_port, rule.to_port))
new_rule['from_port'] = None
new_rule['to_port'] = None
final_rule = SecurityGroupRule(new_rule['ip_protocol'], new_rule['from_port'], new_rule['to_port'], new_rule['cidr_ip'], new_rule['src_group'])
return final_rule
def add_rule(self, rule):
"""Print out why a rule fails to be added, else add a rule to this security group
Rule will be normalized and added to one of two lists of rules:
One group is for rules that can be converged immediately
(those ones have no src_group resource references)
The other group is for rules that will be converged after the resource
reference table has been built
"""
normalized_rule = self.normalize_rule(rule)
failures = self.rule_fails_check(normalized_rule)
if not failures:
self.rules.add(normalized_rule)
else:
for err in failures:
self.heet.logger.error('Security Group failed sanity checks: ')
self.heet.logger.error(' : ' + err)
return
def build_heet_id_tag(self):
"""The tag is what defines a security group as a unique component of heet code
This format has the following consequences:
* you can change the id of a security group and still converge
* you can not converge across projects, environments or sgs with different names, or different VPCs
* you can change the rules of an SG and converge"""
sg_tag = SgTag(self.heet.get_environment(), self.heet.base_name, self.vpc_id, self.aws_name)
tag_value = '/'.join(sg_tag)
tag_name = 'AWSHeet'
return (tag_name, tag_value)
def build_aws_name(self, base_name):
"""The name of the security group is basically the Tag concatenated in order, minus the vpc id
NB: AWS only determines SG uniqueness by (VPC_ID, SG Name), so if you want the same code for different environments,
you have to add some additional environment-specific info to the name"""
return '-'.join([self.heet.get_environment(), self.heet.base_name, base_name])
def rule_has_dependent_reference(self, rule):
"""Check if the rule refers to a security group that is another Heet object
For now, we do that by passing in the heet base_name of the group prefixed with an '@'"""
return self.heet.is_resource_ref(rule.src_group)
def base_name_to_ref(self, base_name):
"""Converts the Heet Script's SG base name into a name reference.
Currently, this just means that it is prepended with an '@'"""
return '@' + base_name
def ref_to_base_name(self,base_name):
"""The opposite of the above."""
if base_name[0] == '@':
return base_name[1:]
else:
self.heet.logger.error("Trying to dereference a SG name that isn't a reference: {}".format(base_name))
return None
def converge(self):
"""Adds missing rules, revokes extra rules, creates entire group if necessary
if the rule can't be converged yet (due to an unresolveable resource reference,
we'll let heet know to call us at the module exit time and re-try via converge_dependency()
when we have the full module resource reference table"""
self.heet.logger.info("Converging security group: %s" % self.aws_name)
boto_self = self.get_resource_object()
if boto_self is None:
self.heet.logger.debug("Creating new group: %s" % self.aws_name)
boto_self = self.conn.create_security_group(self.aws_name, self.description, self.vpc_id)
self.aws_id = boto_self.id
remote_rules = set()
(tag_name,tag_value) = self.heet_id_tag
try:
boto_self.add_tag(key=tag_name, value=tag_value)
except boto.exception.EC2ResponseError as err:
if err.code == 'InvalidGroup.NotFound':
#- wait for API consistency - sleep momentarily before adding tag
self.heet.logger.debug('converge: set_tag failed due to SG not found. Waiting a moment then trying again.')
time.sleep(3)
boto_self.add_tag(key=tag_name, value=tag_value)
else:
self.heet.logger.debug("Using pre-existing group: %s" % self.aws_name)
self.aws_id = boto_self.id
remote_rules = set(self.normalize_aws_sg_rules(boto_self))
self.src_group_references['self'] = boto_self
self.src_group_references[boto_self.id] = boto_self
if self.rules:
desired_rules = set(self.rules)
else:
desired_rules = set()
for rule in desired_rules:
#- if it isn't there, add it
if rule in remote_rules:
#- the rule we want to add is already there, so skip it
self.heet.logger.debug("Already Authorized: %s on %s" % (rule, self))
else:
if rule.src_group:
#- check if this rule can be converged now or later
if self.rule_has_dependent_reference(rule):
self.heet.logger.debug("-- Rule refers to another Heet group. Will converge_dependency() atexit: {}".format(rule))
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
elif self.is_aws_reference(rule.src_group):
#- use the src_group object we already got when we checked the rule
self.heet.logger.info("Adding Authorization: %s on %s" % (rule, self))
try:
boto_self.authorize(rule.ip_protocol,rule.from_port, rule.to_port,rule.cidr_ip, self.src_group_references[rule.src_group])
except KeyError as err:
print ""
print ""
print 'FATAL ERROR: key error in src_group_references. looked for [{}] in:'.format(rule.src_group)
print self.src_group_references
print ""
print ""
os._exit(-1)
else:
print "Unexpected Rule format: {}".format(rule)
raise AttributeError('Source Group reference can NOT be converged')
else:
boto_self.authorize(rule.ip_protocol,rule.from_port, rule.to_port,rule.cidr_ip)
#- remove all the rules that we didn't explicitly declare we want in this group
#- if they can currently be resolved (can only resolve names present in the resource reference table at the moment
#- of execution. )
#- any desired rule that is still in resource reference form because it couldn't be resolved yet will not match
#- anything, so we remove all the resource reference rules from the desired rules before comparison
desired_rules_copy = copy.copy(desired_rules)
for rule in desired_rules_copy:
if self.rule_has_dependent_reference(rule):
desired_rules.discard(rule)
for rule in remote_rules:
if rule not in desired_rules:
if self.is_aws_reference(rule.src_group):
#- skip this rule for now
self.heet.logger.debug('converge: skipping rule with aws sg id: [{}]'.format(rule))
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
#- continue looping, but skip this rule now that we've registered it for convergence at exit
continue
else:
self.heet.logger.info("Removing remote rule not declared locally: {} in {}".format(rule, self))
print ""
print ""
print "DEBUG: removing rule"
print "remote: "
print str(remote_rules)
print ""
print "current rule being tested: "
print str(rule)
print ""
print "desired rules: "
print str(desired_rules)
print ""
print ""
#- boto-specific: get the referring security group boto-level object to delete this rule
#- TODO: this may be redundant if normalization strips the boto object for the src_group
#- as I'm resolving here. This isn't necessary if the pre-normalized rule has the object in it
ref_sg = None
if rule.src_group is not None:
if rule.src_group == 'self':
ref_sg = [self.get_or_create_resource_object()]
elif self.is_aws_reference(rule.src_group):
ref_sg = self.conn.get_all_security_groups(group_ids=rule.src_group)
if len(ref_sg) >= 1:
ref_sg = ref_sg[0]
else:
self.heet.logger.error("Rule to delete references another Security Group that no longer exists. Will fail...")
reg_sg = None
if rule.src_group is not None and ref_sg is None:
#- if we didn't just find it, skip it for now
key = self.make_key_from_rule(rule)
if key not in self.dependent_rules:
self.dependent_rules[key] = rule
self.heet.add_dependent_resource(self, key)
else:
boto_self.revoke(rule.ip_protocol, rule.from_port, rule.to_port, rule.cidr_ip, ref_sg)
#- Post Converge Hook
self.post_converge_hook()
def converge_dependent_add_rule(self, init_rule):
"""Called from converge_dependency for the rules that needed to be added
but used a resource reference that couldn't yet be resolved on first pass in converge()"""
boto_self = self.get_resource_object()
resource_name = init_rule.src_group
boto_src_group = self.heet.resource_refs[resource_name].get_resource_object()
#- TODO: clean this up
#- we need the ID for comparisons, but we need the object for the API call
#- and we start with a resource reference
new_rule = SecurityGroupRule(init_rule.ip_protocol,
init_rule.from_port,
init_rule.to_port,
init_rule.cidr_ip,
boto_src_group.id)
normalized_rule = self.normalize_rule(new_rule)
final_rule = SecurityGroupRule(normalized_rule.ip_protocol,
normalized_rule.from_port,
normalized_rule.to_port,
normalized_rule.cidr_ip,
boto_src_group)
remote_rules = self.normalize_aws_sg_rules(boto_self)
if normalized_rule not in remote_rules:
boto_self.authorize(final_rule.ip_protocol, final_rule.from_port, final_rule.to_port, final_rule.cidr_ip, final_rule.src_group)
time.sleep(AWS_API_COOLDOWN_PERIOD)
return
def converge_dependent_remove_test(self, remote_rule):
"""Take this rule that has an AWS SG ID and is an existing remote rule and now check if this rule is a desired rule or not."""
#- first take all the current desired rules and re-normalize them so the resource references will be looked up
boto_self = self.get_resource_object()
desired_rules = set()
for rule_x in self.rules:
desired_rules.add(self.normalize_rule(rule_x))
if remote_rule not in desired_rules:
self.heet.logger.debug('converge_dependent_remove_test: removing rule [{}]'.format(remote_rule))
boto_src_group = self.get_boto_src_group(remote_rule.src_group)
boto_self.revoke(remote_rule.ip_protocol, remote_rule.from_port, remote_rule.to_port, remote_rule.cidr_ip, boto_src_group)
return
def converge_dependency(self, key):
"""converge_at_exit: this convergence pattern is different than the single-call of converge.
converge_dependency will be called once for every rule that needed to be converged at exit.
This is where we converge the rules that refer to other security groups that are declared in the same AWSHeet module
Dependencies here is any security group rule that referenced another Heet group that is being declared in this script.
If it is the first time the group is created, the referenced group will not exist yet, and so the rule will fail convergence.
So, to keep it simple, any group that refers to another group in a Heet script will be put off to be converged after we are
sure that the creation of the rule should not fail unless there has been an actual error."""
self.heet.logger.debug("----CONVERGE_DEPENDENCY() {}: {}---- {} of {} rules to process".format(self.base_name, key, self._num_converged_dependencies+1, len(self.dependent_rules)))
self._num_converged_dependencies += 1
boto_self = self.get_resource_object()
if not boto_self:
self.heet.logger.debug('converge_dependency: no boto_object found. returning without issuing any API calls')
return
#- lookup the rule as it was when we saved it
init_rule = self.dependent_rules[key]
#- grab the group we need from the resource references
if key == 'DESTROY_AGAIN':
self.heet.logger.debug('converge_dependency: destroying 2nd round')
self.destroy()
else:
src_group_name = self.get_src_group_from_key(key)
if self.heet.is_resource_ref(src_group_name):
#- a bit opaque, but resource references are only called for rules that are trying
#- to be added, so we know if we see a resource reference here that this rule was
#- trying to be added and failed due to a resource reference being unable to be resolved
self.heet.logger.debug('converge_dependency: add_rule detected: [{}]'.format(init_rule))
self.converge_dependent_add_rule(init_rule)
elif self.is_aws_reference(src_group_name):
#- equally opaque, the only other rules we register to be called back for are rules
#- that existed remotely that referred to an AWS ID that we couldn't look up at the time
#- that we needed to check if it should be removed or not
self.heet.logger.debug('converge_dependency: remove_test detected: [{}]'.format(init_rule))
self.converge_dependent_remove_test(init_rule)
return
def destroy(self):
"""Try to remove everything from existence."""
boto_self = self.get_resource_object()
if not boto_self:
self.heet.logger.debug("destroy [{}]: no resource object found, returning without any API calls.".format(self.base_name))
return
#- Pre Destroy Hook
self.pre_destroy_hook()
self.heet.logger.info("deleting SecurityGroup [{}]".format(self.aws_name))
#- first delete any src_group rules so the group can be deleted
self.heet.logger.debug('destroy [{}]: testing [{}] rules to remove ones w/ src_groups'.format(self.aws_name, len(boto_self.rules)))
rules_copy = copy.deepcopy(boto_self.rules)
if isinstance(rules_copy, collections.Iterable) and len(rules_copy) > 0:
for boto_rule in rules_copy:
self.heet.logger.debug('destroy [{}]: testing rule for src_group: [{}]'.format(self.aws_name, boto_rule))
for boto_grant in boto_rule.grants:
if boto_grant.group_id is not None:
self.heet.logger.debug('destroy [{}]: found rule with group_id: [{}]'.format(self.aws_name, boto_grant.group_id))
try:
src_group_ref = self.conn.get_all_security_groups(group_ids=[boto_grant.group_id])[0]
self.heet.logger.debug('destroy [{}]: removing rule with src_group to remove group.({}:{})'.format(self.aws_name, boto_grant.group_id, src_group_ref.name))
boto_self.revoke(boto_rule.ip_protocol, boto_rule.from_port, boto_rule.to_port, boto_grant.cidr_ip, src_group_ref)
time.sleep(AWS_API_COOLDOWN_PERIOD)
except boto.exception.EC2ResponseError as err:
self.heet.logger.debug('destroy [{}]: failed to remove rule: [{}]'.format(self.aws_name, err.message))
self.heet.logger.debug('destroy [{}]: done removing rules.'.format(self.aws_name))
try:
time.sleep(AWS_API_COOLDOWN_PERIOD)
boto_self.delete()
self.heet.logger.info('Successfully deleted group {}.'.format(self.aws_name))
except boto.exception.EC2ResponseError as err:
if 'DESTROY_AGAIN' in self.dependent_rules:
self.heet.logger.info("*** Unable to delete {}. {}".format(self.aws_name, err.message))
else:
#- try again after all the other groups rules are deleted
self.heet.add_dependent_resource(self, 'DESTROY_AGAIN')
self.dependent_rules['DESTROY_AGAIN'] = 'placeholder'
return
| 2,115 | 0 | 54 |
bfe16f1db07ccbc826f95d2ab993112ac10101ca | 766 | py | Python | hacker_rank/special_multiple/special_multiple.py | chidioguejiofor/algorithms | a44fe57b697bf6c2faeb63cce51ba193a886b960 | [
"MIT"
] | null | null | null | hacker_rank/special_multiple/special_multiple.py | chidioguejiofor/algorithms | a44fe57b697bf6c2faeb63cce51ba193a886b960 | [
"MIT"
] | null | null | null | hacker_rank/special_multiple/special_multiple.py | chidioguejiofor/algorithms | a44fe57b697bf6c2faeb63cce51ba193a886b960 | [
"MIT"
] | null | null | null | from urllib.request import urlopen
if __name__ == '__main__':
output_list = read('output')
for index,n in enumerate(read('input')):
special_number = solve(int(n))
if output_list[index] != special_number:
print('Failed for value n = {}'.format(n) )
| 23.9375 | 85 | 0.557441 | from urllib.request import urlopen
def read(file_name = 'input'):
numbers_list =[]
with open('hacker_rank/special_multiple/{0}.txt'.format(file_name), 'r') as file:
for line in file:
numbers_list.append(line.split('\n')[0])
return numbers_list
def solve(n):
count = 1
while True:
binary_str ="{0:b}".format(count)
x = int(binary_str) * 9
if x >= n and x % n == 0:
return str(x)
count = count + 1
if __name__ == '__main__':
output_list = read('output')
for index,n in enumerate(read('input')):
special_number = solve(int(n))
if output_list[index] != special_number:
print('Failed for value n = {}'.format(n) )
| 430 | 0 | 46 |
04eb8cc9f37ad2aae4dc6486fa6965adebe8bc15 | 22,383 | py | Python | softalignments/functions.py | M4t1ss/sAliViz | b955443275ad47fbb172bb006aa2da43fdd7ebd9 | [
"MIT"
] | 72 | 2017-10-20T05:13:35.000Z | 2022-01-26T15:19:12.000Z | softalignments/functions.py | M4t1ss/sAliViz | b955443275ad47fbb172bb006aa2da43fdd7ebd9 | [
"MIT"
] | 25 | 2017-07-06T18:25:17.000Z | 2019-08-27T09:06:38.000Z | softalignments/functions.py | M4t1ss/sAliViz | b955443275ad47fbb172bb006aa2da43fdd7ebd9 | [
"MIT"
] | 18 | 2017-07-04T11:10:56.000Z | 2021-07-21T12:38:53.000Z | # coding: utf-8
from __future__ import division
import unicodedata, math, re, sys, string, os, ntpath, numpy as np
from time import gmtime, strftime
from io import open, StringIO
from imp import reload
from difflib import SequenceMatcher
try:
from itertools import izip
except ImportError:
izip = zip
WORD = re.compile(r'\w+') | 48.764706 | 176 | 0.406022 | # coding: utf-8
from __future__ import division
import unicodedata, math, re, sys, string, os, ntpath, numpy as np
from time import gmtime, strftime
from io import open, StringIO
from imp import reload
from difflib import SequenceMatcher
try:
from itertools import izip
except ImportError:
izip = zip
WORD = re.compile(r'\w+')
def getCP(ali, w = 6):
l = len(ali)
if l == 0:
l = 1
result = 0.0
for ali_i in ali:
s = sum(ali_i)
pen = 1/ (1 + (abs(1 - s))**w)
result += math.log(pen)
return result / l
def getEnt(ali):
l = len(ali)
if l == 0:
l = 1
res = 0.0
for pd in ali:
norm = sum(pd)
if norm > 0:
normPd = [p / norm for p in pd]
entr = -sum([(p * math.log(p) if p else 0) for p in normPd])
res -= entr
else:
res = 0
return res / l
def getRevEnt(ali, w = 0.1):
return getEnt(list(zip(*ali)))
def printHelp():
print ('process_alignments.py -i <input_file> [-o <output_type>] [-f <from_system>] [-s <source_sentence_file>] [-t <target_sentence_file>]')
print ('input_file is the file with alignment weights (required)')
print ('source_sentence_file and target_sentence_file are required only for NeuralMonkey')
print ('output_type can be web (default), block, block2 or color')
print ('from_system can be Nematus, Marian, Sockeye, OpenNMT or NeuralMonkey (default)')
def printColor(value):
colors = [
'[48;5;232m[K [m[K',
'[48;5;233m[K [m[K',
'[48;5;234m[K [m[K',
'[48;5;235m[K [m[K',
'[48;5;236m[K [m[K',
'[48;5;237m[K [m[K',
'[48;5;238m[K [m[K',
'[48;5;239m[K [m[K',
'[48;5;240m[K [m[K',
'[48;5;240m[K [m[K',
'[48;5;241m[K [m[K',
'[48;5;242m[K [m[K',
'[48;5;243m[K [m[K',
'[48;5;244m[K [m[K',
'[48;5;245m[K [m[K',
'[48;5;246m[K [m[K',
'[48;5;247m[K [m[K',
'[48;5;248m[K [m[K',
'[48;5;249m[K [m[K',
'[48;5;250m[K [m[K',
'[48;5;251m[K [m[K',
'[48;5;252m[K [m[K',
'[48;5;253m[K [m[K',
'[48;5;254m[K [m[K',
'[48;5;255m[K [m[K',
'[48;5;255m[K [m[K',
]
num = int(math.floor((value-0.01)*25))
if num<0: num = 0
sys.stdout.write(colors[num])
def printBlock2(value):
blocks2 = ['██', '▉▉', '▊▊', '▋▋', '▌▌', '▍▍', '▎▎', '▏▏', ' ',]
num = int(math.floor((value-0.01)*8))
if num<0: num = 0
sys.stdout.write(blocks2[num])
def printBlock(value):
blocks = ['██', '▓▓', '▒▒', '░░', ' ',]
num = int(math.floor((value-0.01)*4))
if num<0: num = 0
sys.stdout.write(blocks[num])
def readSnts(filename):
with open(filename, 'r', encoding='utf-8') as fh:
return [escape(line).strip().split() for line in fh]
def deBPE(srcs, tgts, ali, sources, targets):
slen = len(sources)
for i in range(slen):
if i > len(sources)-1:
break;
while len(sources[i]) > 2 and sources[i][-2:] == "@@":
sources[i] = sources[i].replace("@@","") + sources[i+1]
del sources[i+1]
slen = len(sources)
#Now sum the alignments
newLength = ali.shape[1]-1
result = np.zeros((ali.shape[0],newLength))
for x in range(newLength):
if x == i:
result[:,x] = np.sum(ali[:,x:x+2],axis=1)
ali = np.delete(ali, x+1, 1)
else:
result[:,x] = ali[:,x]
ali = result
srcs[-1] = sources
tlen = len(targets)
for i in range(tlen):
if i > len(targets)-1:
break;
n = 0
while len(targets[i]) > 2 and targets[i][-2:] == "@@":
n+=1
targets[i] = targets[i].replace("@@","") + targets[i+1]
del targets[i+1]
tlen = len(targets)
if n>0:
#Now average the alignments
newLength = ali.shape[0]-n
result = np.zeros((newLength, ali.shape[1]))
for x in range(newLength):
if x == i:
result[x,:] = np.average(ali[x:x+n+1,:],axis=0)
for c in range(x+n, x, -1):
ali = np.delete(ali, c, 0)
else:
result[x,:] = ali[x,:]
ali = result
tgts[-1] = targets
return srcs, tgts, ali
def readNematus(filename, from_system = "Nematus", de_bpe = False):
with open(filename, 'r', encoding='utf-8') as fh:
alis = []
tgts = []
srcs = []
wasNew = True
aliTXT = ''
for line in fh:
# Reads the first line that contains a translation and it's source sentence
if wasNew:
if len(aliTXT) > 0:
c = StringIO(aliTXT)
ali = np.loadtxt(c)
# Now we probably have source and target tokens + attentions
if de_bpe == True:
# In case we want to combine subword units and the respective attentions (by summing columns and averaging rows)
sources = escape(lineparts[3]).strip().split()
targets = escape(lineparts[1]).strip().split()
(srcs, tgts, ali) = deBPE(srcs, tgts, ali, sources, targets)
if from_system == "Nematus" or from_system == "OpenNMT" or from_system == "Marian-Dev":
ali = ali.transpose()
alis.append(ali)
aliTXT = ''
lineparts = line.split(' ||| ')
if from_system == "Nematus":
lineparts[1] += ' <EOS>'
lineparts[3] += ' <EOS>'
tgts.append(escape(lineparts[1]).strip().split())
srcs.append(escape(lineparts[3]).strip().split())
wasNew = False
continue
# Reads the attention matrix into "aliTXT"
if line != '\n' and line != '\r\n':
aliTXT += line
else:
wasNew = True
if len(aliTXT) > 0:
c = StringIO(aliTXT)
ali = np.loadtxt(c)
if de_bpe == True:
# In case we want to combine subword units and the respective attentions (by summing columns and averaging rows)
sources = escape(lineparts[3]).strip().split()
targets = escape(lineparts[1]).strip().split()
(srcs, tgts, ali) = deBPE(srcs, tgts, ali, sources, targets)
if from_system == "Nematus" or from_system == "Sockeye" or from_system == "Marian-Dev":
ali = ali.transpose()
alis.append(ali)
aliTXT = ''
return srcs, tgts, alis
def escape(string):
return string.replace('"','"').replace("'","'")
def readAmu(in_file, src_file):
with open(src_file, 'r', encoding='utf-8') as fi:
with open(in_file, 'r', encoding='utf-8') as fh:
alis = []
tgts = []
srcs = []
aliTXT = ''
for src_line, out_line in izip(fi, fh):
lineparts = out_line.split(' ||| ')
src_line = src_line.strip() + ' <EOS>'
tgts.append(escape(lineparts[0]).strip().split())
srcs.append(escape(src_line).split())
#alignment weights
weightparts = lineparts[1].split(' ')
for weightpart in weightparts:
aliTXT += weightpart.replace(',',' ') + '\n'
if len(aliTXT) > 0:
c = StringIO(aliTXT)
ali = np.loadtxt(c)
ali = ali.transpose()
alis.append(ali)
aliTXT = ''
return srcs, tgts, alis
def compare(srcs1, srcs2):
for i in range(0, len(srcs1)):
if srcs1[i][len(srcs1[i])-1] != '<EOS>':
srcs1[i].append('<EOS>')
if srcs2[i][len(srcs2[i])-1] != '<EOS>':
srcs2[i].append('<EOS>')
return srcs1==srcs2
def synchData(data1,data2):
addEOS1 = False
addEOS2 = False
for i in range(0, len(data1)):
diff1 = len(data1[i][1]) - len(data2[i][1])
diff2 = len(data2[i][1]) - len(data1[i][1])
if(diff1 > 0):
for j in range(0, diff1):
data2[i][1].append(u'')
if(diff2 > 0):
for j in range(0, diff2):
data1[i][1].append(u'')
return data1, data2
def longestCommonSubstring(s1, s2):
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
def processAlignments(data, folder, inputfile, outputType, num, refs=False):
with open(folder + "/" + ntpath.basename(inputfile) + '.ali.js', 'w', encoding='utf-8') as out_a_js:
with open(folder + "/" + ntpath.basename(inputfile) + '.src.js', 'w', encoding='utf-8') as out_s_js:
with open(folder + "/" + ntpath.basename(inputfile) + '.trg.js', 'w', encoding='utf-8') as out_t_js:
with open(folder + "/" + ntpath.basename(inputfile) + '.con.js', 'w', encoding='utf-8') as out_c_js:
with open(folder + "/" + ntpath.basename(inputfile) + '.sc.js', 'w', encoding='utf-8') as out_sc_js:
out_a_js.write(u'var alignments = [\n')
out_s_js.write(u'var sources = [\n')
out_t_js.write(u'var targets = [\n')
out_c_js.write(u'var confidences = [\n')
out_sc_js.write(u'var sentence_confidences = [\n')
num = int(num) - 1
if num > -1 and (num < len(data)):
data = [data[num]]
elif num >= len(data):
print ('The selected sentence number is higher than the sentence count!\n')
printHelp()
sys.exit()
for i in range(0, len(data)):
(src, tgt, rawAli) = data[i]
#In case the source string is empty
if rawAli.ndim == 1:
rawAli = np.array([rawAli])
ali = [l[:len(list(filter(None, tgt)))] for l in rawAli[:len(src)]]
srcTotal = []
trgTotal = []
tali = np.array(ali).transpose()
for a in range(0, len(ali)):
srcTotal.append(str(math.pow(math.e, -0.05 * math.pow((getCP([ali[a]]) + getEnt([ali[a]]) + getRevEnt([ali[a]])), 2))))
for a in range(0, len(tali)):
trgTotal.append(str(math.pow(math.e, -0.05 * math.pow((getCP([tali[a]]) + getEnt([tali[a]]) + getRevEnt([tali[a]])), 2))))
JoinedSource = " ".join(src)
JoinedTarget = " ".join(tgt)
StrippedSource = ''.join(c for c in JoinedSource if unicodedata.category(c).startswith('L')).replace('EOS','').replace('quot','').replace('apos','')
StrippedTarget = ''.join(c for c in JoinedTarget if unicodedata.category(c).startswith('L')).replace('EOS','').replace('quot','').replace('apos','')
#Get the confidence metrics
CDP = round(getCP(ali), 10)
APout = round(getEnt(ali), 10)
APin = round(getRevEnt(ali), 10)
Total = round(CDP + APout + APin, 10)
#Can we calculate BLEU?
bleuNumber = -1
if(refs):
try:
#NLTK requires Python versions 3.5, 3.6, 3.7, or 3.8
version = sys.version_info
if version.major == 3 and version.minor > 4:
from nltk.translate import bleu
from nltk.translate.bleu_score import SmoothingFunction
sm = SmoothingFunction()
refNumber = i if num < 0 else num
deBpeRef = " ".join(refs[refNumber]).replace('@@ ','')
deBpeHyp = JoinedTarget.replace('@@ ','').replace('<EOS>','').strip()
bleuNumber = round(bleu([deBpeRef.split()], deBpeHyp.split(), smoothing_function=sm.method3)*100, 2)
bleuScore = u', ' + repr(bleuNumber)
else:
refs = False
bleuScore = u''
except ImportError:
sys.stdout.write('NLTK not found! BLEU will not be calculated\n')
refs = False
bleuScore = u''
else:
bleuScore = u''
jls = JoinedSource.replace('@@ ','').replace('<EOS>','').replace('"','"').replace("'","'").replace("&","&").replace("@-@","-").strip()
jlt = JoinedTarget.replace('@@ ','').replace('<EOS>','').replace('"','"').replace("'","'").replace("&","&").replace("@-@","-").strip()
longest = longestCommonSubstring(jls, jlt).strip()
similarity = len(longest)/len(jlt)
#Penalize sentences with more than 4 tokens
if (len(tgt) > 4) and (similarity > 0.3):
#The more similar, the higher penalty
#It's worse to have more words with a higher similarity
#Let's make it between 0.7 and about 1.5 for veeeery long sentences
multiplier = ((0.8+(len(tgt)*0.01)) * (3-((1-similarity)*5)) * (0.7 + similarity) * math.tan(similarity))
Total = round(CDP + APout + APin - multiplier, 10)
# e^(-1(x^2))
CDP_pr = round(math.pow(math.e, -1 * math.pow(CDP, 2)) * 100, 2)
# e^(-0.05(x^2))
APout_pr = round(math.pow(math.e, -0.05 * math.pow(APout, 2)) * 100, 2)
APin_pr = round(math.pow(math.e, -0.05 * math.pow(APin, 2)) * 100, 2)
Total_pr = round(math.pow(math.e, -0.05 * math.pow(Total, 2)) * 100, 2)
# 1-e^(-0.0001(x^2))
Len = round((1-math.pow(math.e, -0.0001 * math.pow(len(JoinedSource), 2))) * 100, 2)
out_s_js.write('["'+ JoinedSource.replace(' ','", "') +'"], \n')
out_t_js.write('["'+ JoinedTarget.replace(' ','", "') +'"], \n')
out_c_js.write(u'['+ repr(CDP_pr) + u', '+ repr(APout_pr) + u', '+ repr(APin_pr) + u', '+ repr(Total_pr)
+ u', '+ repr(Len) + u', '+ repr(len(JoinedSource)) + u', '
+ repr(round(similarity*100, 2))
+ bleuScore
+ u'], \n')
out_sc_js.write(u'[[' + ", ".join(srcTotal) + u'], ' + u'[' + ", ".join(trgTotal) + u'], ' + u'], \n')
word = 0
out_a_js.write(u'[')
for ali_i in ali:
linePartC=0
for ali_j in ali_i:
# Maybe worth playing around with this for transformer (and convolutional) NMT output
# if ali_j < 0.15:
# ali_j = 0
out_a_js.write(u'['+repr(word)+u', ' + str(np.round(ali_j, 8)) + u', '+repr(linePartC)+u'], ')
linePartC+=1
if outputType == 'color':
printColor(ali_j)
elif outputType == 'block':
printBlock(ali_j)
elif outputType == 'block2':
printBlock2(ali_j)
if outputType != 'web' and outputType != 'compare':
sys.stdout.write(src[word].encode('utf-8', errors='replace').decode('utf-8'))
word+=1
if outputType != 'web' and outputType != 'compare':
sys.stdout.write('\n')
# write target sentences
#build 2d array
occupied_to = []
outchars = []
outchars.append([])
tw = 0
for tword in tgt:
columns = len(tgt)
# Some characters use multiple symbols. Need to decode and then encode...
twchars = list(tword)
twlen = len(twchars)
xpos = tw * 2
emptyline = 0
for el in range(0, len(occupied_to)):
# if occupied, move to a new line!
if occupied_to[el] < xpos:
emptyline = el
if len(outchars) < emptyline+1:
# add a new row
outchars.append([])
break
if el == len(occupied_to)-1:
emptyline=el+1
if len(outchars) < emptyline+1:
outchars.append([])
for column in range(0, xpos):
if len(outchars[emptyline]) <= column:
outchars[emptyline].append(' ')
for charindex in range(0, twlen):
if xpos+charindex == len(outchars[emptyline]):
outchars[emptyline].append(twchars[charindex])
else:
outchars[emptyline][charindex] = twchars[charindex]
if len(occupied_to) <= emptyline:
occupied_to.append(xpos+twlen+1)
else:
occupied_to[emptyline]=xpos+twlen+1;
tw+=1
#print 2d array
if outputType != 'web' and outputType != 'compare':
for liline in outchars:
sys.stdout.write(''.join(liline).encode('utf-8', errors='replace').decode('utf-8') + '\n')
# print scores
sys.stdout.write('\nCoverage Deviation Penalty: \t\t' + repr(round(CDP, 8)) + ' (' + repr(CDP_pr) + '%)' + '\n')
sys.stdout.write('Input Absentmindedness Penalty: \t' + repr(round(APin, 8)) + ' (' + repr(APin_pr) + '%)' + '\n')
sys.stdout.write('Output Absentmindedness Penalty: \t' + repr(round(APout, 8)) + ' (' + repr(APout_pr) + '%)' + '\n')
sys.stdout.write('Confidence: \t\t\t\t' + repr(round(Total, 8)) + ' (' + repr(Total_pr) + '%)' + '\n')
sys.stdout.write('Similarity: \t\t\t\t' + repr(round(similarity*100, 2)) + '%' + '\n')
if bleuNumber > -1:
sys.stdout.write('BLEU: \t\t\t\t\t' + repr(bleuNumber) + '\n')
# write target sentences
word = 0
out_a_js.write(u'], \n')
if outputType != 'web' and outputType != 'compare':
sys.stdout.write('\n')
out_a_js.write(u'\n]')
out_s_js.write(u']')
out_t_js.write(u']')
out_c_js.write(u']')
out_sc_js.write(u']') | 21,680 | 0 | 416 |
fd0c73d36a9eef136cedf435bbbaf020ebd6db4f | 1,455 | py | Python | packages/monomanage/src/monomanage/draw/api.py | 0mars/graphx | 8f58df3979b5fd96e4183811c9d8339c92367d00 | [
"Apache-2.0"
] | null | null | null | packages/monomanage/src/monomanage/draw/api.py | 0mars/graphx | 8f58df3979b5fd96e4183811c9d8339c92367d00 | [
"Apache-2.0"
] | null | null | null | packages/monomanage/src/monomanage/draw/api.py | 0mars/graphx | 8f58df3979b5fd96e4183811c9d8339c92367d00 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019 Simon Biggs
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version (the "AGPL-3.0+").
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License and the additional terms for more
# details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ADDITIONAL TERMS are also included as allowed by Section 7 of the GNU
# Affero General Public License. These additional terms are Sections 1, 5,
# 6, 7, 8, and 9 from the Apache License, Version 2.0 (the "Apache-2.0")
# where all references to the definition "License" are instead defined to
# mean the AGPL-3.0+.
# You should have received a copy of the Apache-2.0 along with this
# program. If not, see <http://www.apache.org/licenses/LICENSE-2.0>.
from .packages import draw_packages
from .directories import draw_directory_modules
from .files import draw_file_modules
| 40.416667 | 74 | 0.768385 | # Copyright (C) 2019 Simon Biggs
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version (the "AGPL-3.0+").
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License and the additional terms for more
# details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ADDITIONAL TERMS are also included as allowed by Section 7 of the GNU
# Affero General Public License. These additional terms are Sections 1, 5,
# 6, 7, 8, and 9 from the Apache License, Version 2.0 (the "Apache-2.0")
# where all references to the definition "License" are instead defined to
# mean the AGPL-3.0+.
# You should have received a copy of the Apache-2.0 along with this
# program. If not, see <http://www.apache.org/licenses/LICENSE-2.0>.
from .packages import draw_packages
from .directories import draw_directory_modules
from .files import draw_file_modules
def draw_all(save_directory):
draw_packages(save_directory)
draw_directory_modules(save_directory)
draw_file_modules(save_directory)
| 123 | 0 | 23 |
2a890cc6ac3eba416edc773bb131c3af8c69287d | 5,589 | py | Python | src/ingest_validation_tools/error_report.py | pecan88/ingest-validation-tools | f26b08ca4e0ef7bcdc504032850e2b26470199a7 | [
"MIT"
] | 6 | 2020-04-02T19:35:52.000Z | 2021-03-17T20:48:41.000Z | src/ingest_validation_tools/error_report.py | pecan88/ingest-validation-tools | f26b08ca4e0ef7bcdc504032850e2b26470199a7 | [
"MIT"
] | 601 | 2020-03-13T19:34:22.000Z | 2022-03-31T17:35:09.000Z | src/ingest_validation_tools/error_report.py | pecan88/ingest-validation-tools | f26b08ca4e0ef7bcdc504032850e2b26470199a7 | [
"MIT"
] | 12 | 2020-09-23T18:48:54.000Z | 2022-01-06T00:28:13.000Z | from datetime import datetime
from yaml import Dumper, dump
from webbrowser import open_new_tab
from pathlib import Path
from yattag import Doc, indent
from ingest_validation_tools.message_munger import munge
# Force dump not to use alias syntax.
# https://stackoverflow.com/questions/13518819/avoid-references-in-pyyaml
Dumper.ignore_aliases = lambda *args: True
def _build_list(anything, path=None):
'''
>>> flat = _build_list({
... 'nested dict': {
... 'like': 'this'
... },
... 'nested array': [
... 'like',
... 'this'
... ],
... 'string': 'like this',
... 'number': 42
... })
>>> print('\\n'.join(flat))
nested dict: like: this
nested array: like
nested array: this
string: like this
number: 42
'''
prefix = f'{path}: ' if path else ''
if isinstance(anything, dict):
if all(isinstance(v, (float, int, str)) for v in anything.values()):
return [f'{prefix}{k}: {v}' for k, v in anything.items()]
else:
to_return = []
for k, v in anything.items():
to_return += _build_list(v, path=f'{prefix}{k}')
return to_return
elif isinstance(anything, list):
if all(isinstance(v, (float, int, str)) for v in anything):
return [f'{prefix}{v}' for v in anything]
else:
to_return = []
for v in anything:
to_return += _build_list(v, path=path)
return to_return
else:
return [f'{prefix}{anything}']
def _build_doc(tag, line, anything):
'''
>>> doc, tag, text, line = Doc().ttl()
>>> _build_doc(tag, line, {
... 'nested dict': {
... 'like': 'this'
... },
... 'nested array': [
... 'like',
... 'this'
... ]
... })
>>> print(indent(doc.getvalue()))
<details>
<summary>nested dict</summary>
<dl>
<dt>like</dt>
<dd>this</dd>
</dl>
</details>
<details>
<summary>nested array</summary>
<ul>
<li>like</li>
<li>this</li>
</ul>
</details>
'''
if isinstance(anything, dict):
if all(isinstance(v, (float, int, str)) for v in anything.values()):
with tag('dl'):
for k, v in anything.items():
line('dt', k)
line('dd', v)
else:
for k, v in anything.items():
with tag('details'):
line('summary', k)
_build_doc(tag, line, v)
elif isinstance(anything, list):
if all(isinstance(v, (float, int, str)) for v in anything):
with tag('ul'):
for v in anything:
line('li', v)
else:
for v in anything:
_build_doc(tag, line, v)
else:
line('div', anything)
| 29.571429 | 86 | 0.509572 | from datetime import datetime
from yaml import Dumper, dump
from webbrowser import open_new_tab
from pathlib import Path
from yattag import Doc, indent
from ingest_validation_tools.message_munger import munge
# Force dump not to use alias syntax.
# https://stackoverflow.com/questions/13518819/avoid-references-in-pyyaml
Dumper.ignore_aliases = lambda *args: True
class ErrorReport:
def __init__(self, errors_dict):
self.errors = errors_dict
if self.errors:
self.errors['Hint'] = \
'If validation fails because of extra whitespace in the TSV, try:\n' \
'src/cleanup_whitespace.py --tsv_in original.tsv --tsv_out clean.tsv'
def _as_list(self):
return [munge(m) for m in _build_list(self.errors)]
def as_text_list(self):
return '\n'.join(self._as_list()) or 'No errors!\n'
def as_yaml(self):
return dump(self.errors, sort_keys=False)
def as_text(self):
if not self.errors:
return 'No errors!\n'
else:
return self.as_yaml()
def as_md(self):
return f'```\n{self.as_text()}```'
def as_html_fragment(self):
'''
>>> print(ErrorReport({}).as_html_fragment())
No errors!
>>> report = ErrorReport({'really': 'simple'})
>>> print(report.as_html_fragment())
<dl>
<dt>really</dt>
<dd>simple</dd>
<dt>Hint</dt>
<dd>If validation fails because of extra whitespace in the TSV, try:
src/cleanup_whitespace.py --tsv_in original.tsv --tsv_out clean.tsv</dd>
</dl>
'''
if not self.errors:
return 'No errors!'
doc, tag, _, line = Doc().ttl()
_build_doc(tag, line, self.errors)
return indent(doc.getvalue())
def as_html_doc(self):
doc, tag, text, line = Doc().ttl()
for_each = "Array.from(document.getElementsByTagName('details')).forEach"
with tag('html'):
with tag('head'):
with tag('style', type='text/css'):
text('''
details {
padding-left: 1em;
}
ul {
margin: 0;
}''')
with tag('body'):
line(
'button', 'Open all',
onclick=f"{for_each}((node)=>{{node.setAttribute('open','')}})")
line(
'button', 'Close all',
onclick=f"{for_each}((node)=>{{node.removeAttribute('open')}})")
_build_doc(tag, line, self.errors)
return '<!DOCTYPE html>\n' + indent(doc.getvalue())
def as_browser(self):
if not self.errors:
return self.as_text()
html = self.as_html_doc()
filename = f"{str(datetime.now()).replace(' ', '_')}.html"
path = Path(__file__).parent / 'error-reports' / filename
path.write_text(html)
url = f'file://{path.resolve()}'
open_new_tab(url)
return f'See {url}'
def _build_list(anything, path=None):
'''
>>> flat = _build_list({
... 'nested dict': {
... 'like': 'this'
... },
... 'nested array': [
... 'like',
... 'this'
... ],
... 'string': 'like this',
... 'number': 42
... })
>>> print('\\n'.join(flat))
nested dict: like: this
nested array: like
nested array: this
string: like this
number: 42
'''
prefix = f'{path}: ' if path else ''
if isinstance(anything, dict):
if all(isinstance(v, (float, int, str)) for v in anything.values()):
return [f'{prefix}{k}: {v}' for k, v in anything.items()]
else:
to_return = []
for k, v in anything.items():
to_return += _build_list(v, path=f'{prefix}{k}')
return to_return
elif isinstance(anything, list):
if all(isinstance(v, (float, int, str)) for v in anything):
return [f'{prefix}{v}' for v in anything]
else:
to_return = []
for v in anything:
to_return += _build_list(v, path=path)
return to_return
else:
return [f'{prefix}{anything}']
def _build_doc(tag, line, anything):
'''
>>> doc, tag, text, line = Doc().ttl()
>>> _build_doc(tag, line, {
... 'nested dict': {
... 'like': 'this'
... },
... 'nested array': [
... 'like',
... 'this'
... ]
... })
>>> print(indent(doc.getvalue()))
<details>
<summary>nested dict</summary>
<dl>
<dt>like</dt>
<dd>this</dd>
</dl>
</details>
<details>
<summary>nested array</summary>
<ul>
<li>like</li>
<li>this</li>
</ul>
</details>
'''
if isinstance(anything, dict):
if all(isinstance(v, (float, int, str)) for v in anything.values()):
with tag('dl'):
for k, v in anything.items():
line('dt', k)
line('dd', v)
else:
for k, v in anything.items():
with tag('details'):
line('summary', k)
_build_doc(tag, line, v)
elif isinstance(anything, list):
if all(isinstance(v, (float, int, str)) for v in anything):
with tag('ul'):
for v in anything:
line('li', v)
else:
for v in anything:
_build_doc(tag, line, v)
else:
line('div', anything)
| 1,708 | 887 | 23 |
ad02b9fe84b16f1f139083b8fd2109fe31a4c960 | 9,705 | py | Python | src/guesswhat/eval/interactive_dialogue.py | devineproject/guesswhat | 512e136c868ceccf047cdba243cf46037d4037fe | [
"Apache-2.0"
] | 72 | 2017-07-07T04:40:32.000Z | 2021-10-05T13:00:02.000Z | src/guesswhat/eval/interactive_dialogue.py | devineproject/guesswhat | 512e136c868ceccf047cdba243cf46037d4037fe | [
"Apache-2.0"
] | 25 | 2017-06-30T18:35:24.000Z | 2021-11-21T12:01:09.000Z | src/guesswhat/eval/interactive_dialogue.py | devineproject/guesswhat | 512e136c868ceccf047cdba243cf46037d4037fe | [
"Apache-2.0"
] | 40 | 2017-06-30T12:13:17.000Z | 2021-09-25T07:34:54.000Z |
import argparse
import os
from multiprocessing import Pool
import logging
import random
import copy
import tensorflow as tf
from generic.data_provider.iterator import BasicIterator
from generic.tf_utils.evaluator import Evaluator
from generic.data_provider.image_loader import get_img_builder
from guesswhat.models.oracle.oracle_network import OracleNetwork
from guesswhat.models.qgen.qgen_lstm_network import QGenNetworkLSTM
from guesswhat.models.guesser.guesser_network import GuesserNetwork
from guesswhat.models.looper.basic_looper import BasicLooper
from guesswhat.models.qgen.qgen_wrapper import QGenWrapper, QGenUserWrapper
from guesswhat.models.oracle.oracle_wrapper import OracleWrapper, OracleUserWrapper
from guesswhat.models.guesser.guesser_wrapper import GuesserWrapper, GuesserUserWrapper
from guesswhat.data_provider.guesswhat_dataset import Dataset
from guesswhat.data_provider.looper_batchifier import LooperBatchifier
from guesswhat.data_provider.guesswhat_tokenizer import GWTokenizer
from generic.utils.config import load_config, get_config_from_xp
if __name__ == '__main__':
parser = argparse.ArgumentParser('Question generator (policy gradient baseline))')
parser.add_argument("-data_dir", type=str, required=True, help="Directory with data")
parser.add_argument("-img_dir", type=str, help='Directory with images to feed networks')
parser.add_argument("-img_raw_dir", type=str, help='Directory with images to display')
parser.add_argument("-crop_dir", type=str, help='Directory with crops')
parser.add_argument("-exp_dir", type=str, required=False, help="Directory to output dialogue")
parser.add_argument("-config", type=str, required=True, help='Config file')
parser.add_argument("-dict_file", type=str, default="dict.json", help="Dictionary file name")
parser.add_argument("-networks_dir", type=str, help="Directory with pretrained networks")
parser.add_argument("-oracle_identifier", type=str, default="156cb3d352b97ba12ffd6cf547281ae2", required=False , help='Oracle identifier - if none: user must be the oracle') # Use checkpoint id instead?
parser.add_argument("-qgen_identifier", type=str, default="7b24d8b68f94bde9774cd9555584fd93", required=False, help='Qgen identifier - if none: user must be the Qgen')
parser.add_argument("-guesser_identifier", type=str, required=False, help='Guesser identifier - if none: user must be the guesser')
parser.add_argument("-gpu_ratio", type=float, default=0.95, help="How many GPU ram is required? (ratio)")
args = parser.parse_args()
eval_config, exp_identifier, save_path = load_config(args.config, args.exp_dir)
# Load all networks configs
logger = logging.getLogger()
###############################
# LOAD DATA
#############################
# Load image
logger.info('Loading images..')
image_builder = get_img_builder(eval_config['image'], args.img_dir)
crop_builder = None
if eval_config.get('crop', False):
logger.info('Loading crops..')
crop_builder = get_img_builder(eval_config['crop'], args.crop_dir, is_crop=True)
# Load data
logger.info('Loading data..')
trainset = Dataset(args.data_dir, "train", image_builder, crop_builder)
validset = Dataset(args.data_dir, "valid", image_builder, crop_builder)
testset = Dataset(args.data_dir, "test", image_builder, crop_builder)
dataset, dummy_dataset = trainset, validset
dataset.games = trainset.games + validset.games + testset.games
dummy_dataset.games = []
# hack dataset to only keep one game by image
image_id_set = {}
games = []
for game in dataset.games:
if game.image.id not in image_id_set:
games.append(game)
image_id_set[game.image.id] = 1
dataset.games = games
# Load dictionary
logger.info('Loading dictionary..')
tokenizer = GWTokenizer(os.path.join(args.data_dir, args.dict_file))
###############################
# START TRAINING
#############################
# CPU/GPU option
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
###############################
# LOAD NETWORKS
#############################
if args.oracle_identifier is not None:
oracle_config = get_config_from_xp(os.path.join(args.networks_dir, "oracle"), args.oracle_identifier)
oracle_network = OracleNetwork(oracle_config, num_words=tokenizer.no_words)
oracle_var = [v for v in tf.global_variables() if "oracle" in v.name]
oracle_saver = tf.train.Saver(var_list=oracle_var)
oracle_saver.restore(sess, os.path.join(args.networks_dir, 'oracle', args.oracle_identifier, 'params.ckpt'))
oracle_wrapper = OracleWrapper(oracle_network, tokenizer)
else:
oracle_wrapper = OracleUserWrapper(tokenizer)
logger.info("No Oracle was registered >>> use user input")
if args.guesser_identifier is not None:
guesser_config = get_config_from_xp(os.path.join(args.networks_dir, "guesser"), args.guesser_identifier)
guesser_network = GuesserNetwork(guesser_config["model"], num_words=tokenizer.no_words)
guesser_var = [v for v in tf.global_variables() if "guesser" in v.name]
guesser_saver = tf.train.Saver(var_list=guesser_var)
guesser_saver.restore(sess, os.path.join(args.networks_dir, 'guesser', args.guesser_identifier, 'params.ckpt'))
guesser_wrapper = GuesserWrapper(guesser_network)
else:
guesser_wrapper = GuesserUserWrapper(tokenizer, img_raw_dir=args.img_raw_dir)
logger.info("No Guesser was registered >>> use user input")
if args.qgen_identifier is not None:
qgen_config = get_config_from_xp(os.path.join(args.networks_dir, "qgen"), args.qgen_identifier)
qgen_network = QGenNetworkLSTM(qgen_config["model"], num_words=tokenizer.no_words, policy_gradient=False)
qgen_var = [v for v in tf.global_variables() if "qgen" in v.name] # and 'rl_baseline' not in v.name
qgen_saver = tf.train.Saver(var_list=qgen_var)
qgen_saver.restore(sess, os.path.join(args.networks_dir, 'qgen', args.qgen_identifier, 'params.ckpt'))
qgen_network.build_sampling_graph(qgen_config["model"], tokenizer=tokenizer, max_length=eval_config['loop']['max_depth'])
qgen_wrapper = QGenWrapper(qgen_network, tokenizer,
max_length=eval_config['loop']['max_depth'],
k_best=eval_config['loop']['beam_k_best'])
else:
qgen_wrapper = QGenUserWrapper(tokenizer)
logger.info("No QGen was registered >>> use user input")
looper_evaluator = BasicLooper(eval_config,
oracle_wrapper=oracle_wrapper,
guesser_wrapper=guesser_wrapper,
qgen_wrapper=qgen_wrapper,
tokenizer=tokenizer,
batch_size=1)
logs = []
# Start training
final_val_score = 0.
batchifier = LooperBatchifier(tokenizer, generate_new_games=False)
while True:
# Start new game
while True:
id_str = input('Do you want to play a new game? (Yes/No) --> ').lower()
if id_str == "y" or id_str == "yes": break
elif id_str == "n" or id_str == "no": exit(0)
# Pick id image
image_id = 0
while True:
id_str = int(input('What is the image id you want to select? (-1 for random id) --> '))
if id_str in image_id_set:
image_id = id_str
break
elif id_str == -1:
image_id = random.choice(list(image_id_set.keys()))
break
else:
print("Could not find the following image id: {}".format(id_str))
game = [g for g in dataset.games if g.image.id == image_id][0]
game = copy.deepcopy(game)
print("Selecting image {}".format(game.image.filename))
# Pick id object
print("Available objects")
for i, obj in enumerate(game.objects):
print(" -", i, ":", obj.category, "\t", obj.bbox)
print("Type '(S)how' to display the image with the object")
while True:
id_str = input('Which object id do you want to select? (-1 for random id) --> ')
if id_str == "S" or id_str.lower() == "show":
game.show(img_raw_dir=args.img_raw_dir, display_index=True)
continue
id_str = int(id_str)
if 0 <= id_str < len(game.objects):
object_index = id_str
object_id = game.objects[object_index].id
break
elif id_str == -1:
object_id = random.choice(game.objects).id
break
else:
print("Could not find the following object index: {}".format(id_str))
game.object_id = object_id
dummy_dataset.games = [game]
iterator = BasicIterator(dummy_dataset, batch_size=1, batchifier=batchifier)
success = looper_evaluator.process(sess, iterator, mode="greedy")
| 41.474359 | 207 | 0.636064 |
import argparse
import os
from multiprocessing import Pool
import logging
import random
import copy
import tensorflow as tf
from generic.data_provider.iterator import BasicIterator
from generic.tf_utils.evaluator import Evaluator
from generic.data_provider.image_loader import get_img_builder
from guesswhat.models.oracle.oracle_network import OracleNetwork
from guesswhat.models.qgen.qgen_lstm_network import QGenNetworkLSTM
from guesswhat.models.guesser.guesser_network import GuesserNetwork
from guesswhat.models.looper.basic_looper import BasicLooper
from guesswhat.models.qgen.qgen_wrapper import QGenWrapper, QGenUserWrapper
from guesswhat.models.oracle.oracle_wrapper import OracleWrapper, OracleUserWrapper
from guesswhat.models.guesser.guesser_wrapper import GuesserWrapper, GuesserUserWrapper
from guesswhat.data_provider.guesswhat_dataset import Dataset
from guesswhat.data_provider.looper_batchifier import LooperBatchifier
from guesswhat.data_provider.guesswhat_tokenizer import GWTokenizer
from generic.utils.config import load_config, get_config_from_xp
if __name__ == '__main__':
parser = argparse.ArgumentParser('Question generator (policy gradient baseline))')
parser.add_argument("-data_dir", type=str, required=True, help="Directory with data")
parser.add_argument("-img_dir", type=str, help='Directory with images to feed networks')
parser.add_argument("-img_raw_dir", type=str, help='Directory with images to display')
parser.add_argument("-crop_dir", type=str, help='Directory with crops')
parser.add_argument("-exp_dir", type=str, required=False, help="Directory to output dialogue")
parser.add_argument("-config", type=str, required=True, help='Config file')
parser.add_argument("-dict_file", type=str, default="dict.json", help="Dictionary file name")
parser.add_argument("-networks_dir", type=str, help="Directory with pretrained networks")
parser.add_argument("-oracle_identifier", type=str, default="156cb3d352b97ba12ffd6cf547281ae2", required=False , help='Oracle identifier - if none: user must be the oracle') # Use checkpoint id instead?
parser.add_argument("-qgen_identifier", type=str, default="7b24d8b68f94bde9774cd9555584fd93", required=False, help='Qgen identifier - if none: user must be the Qgen')
parser.add_argument("-guesser_identifier", type=str, required=False, help='Guesser identifier - if none: user must be the guesser')
parser.add_argument("-gpu_ratio", type=float, default=0.95, help="How many GPU ram is required? (ratio)")
args = parser.parse_args()
eval_config, exp_identifier, save_path = load_config(args.config, args.exp_dir)
# Load all networks configs
logger = logging.getLogger()
###############################
# LOAD DATA
#############################
# Load image
logger.info('Loading images..')
image_builder = get_img_builder(eval_config['image'], args.img_dir)
crop_builder = None
if eval_config.get('crop', False):
logger.info('Loading crops..')
crop_builder = get_img_builder(eval_config['crop'], args.crop_dir, is_crop=True)
# Load data
logger.info('Loading data..')
trainset = Dataset(args.data_dir, "train", image_builder, crop_builder)
validset = Dataset(args.data_dir, "valid", image_builder, crop_builder)
testset = Dataset(args.data_dir, "test", image_builder, crop_builder)
dataset, dummy_dataset = trainset, validset
dataset.games = trainset.games + validset.games + testset.games
dummy_dataset.games = []
# hack dataset to only keep one game by image
image_id_set = {}
games = []
for game in dataset.games:
if game.image.id not in image_id_set:
games.append(game)
image_id_set[game.image.id] = 1
dataset.games = games
# Load dictionary
logger.info('Loading dictionary..')
tokenizer = GWTokenizer(os.path.join(args.data_dir, args.dict_file))
###############################
# START TRAINING
#############################
# CPU/GPU option
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
###############################
# LOAD NETWORKS
#############################
if args.oracle_identifier is not None:
oracle_config = get_config_from_xp(os.path.join(args.networks_dir, "oracle"), args.oracle_identifier)
oracle_network = OracleNetwork(oracle_config, num_words=tokenizer.no_words)
oracle_var = [v for v in tf.global_variables() if "oracle" in v.name]
oracle_saver = tf.train.Saver(var_list=oracle_var)
oracle_saver.restore(sess, os.path.join(args.networks_dir, 'oracle', args.oracle_identifier, 'params.ckpt'))
oracle_wrapper = OracleWrapper(oracle_network, tokenizer)
else:
oracle_wrapper = OracleUserWrapper(tokenizer)
logger.info("No Oracle was registered >>> use user input")
if args.guesser_identifier is not None:
guesser_config = get_config_from_xp(os.path.join(args.networks_dir, "guesser"), args.guesser_identifier)
guesser_network = GuesserNetwork(guesser_config["model"], num_words=tokenizer.no_words)
guesser_var = [v for v in tf.global_variables() if "guesser" in v.name]
guesser_saver = tf.train.Saver(var_list=guesser_var)
guesser_saver.restore(sess, os.path.join(args.networks_dir, 'guesser', args.guesser_identifier, 'params.ckpt'))
guesser_wrapper = GuesserWrapper(guesser_network)
else:
guesser_wrapper = GuesserUserWrapper(tokenizer, img_raw_dir=args.img_raw_dir)
logger.info("No Guesser was registered >>> use user input")
if args.qgen_identifier is not None:
qgen_config = get_config_from_xp(os.path.join(args.networks_dir, "qgen"), args.qgen_identifier)
qgen_network = QGenNetworkLSTM(qgen_config["model"], num_words=tokenizer.no_words, policy_gradient=False)
qgen_var = [v for v in tf.global_variables() if "qgen" in v.name] # and 'rl_baseline' not in v.name
qgen_saver = tf.train.Saver(var_list=qgen_var)
qgen_saver.restore(sess, os.path.join(args.networks_dir, 'qgen', args.qgen_identifier, 'params.ckpt'))
qgen_network.build_sampling_graph(qgen_config["model"], tokenizer=tokenizer, max_length=eval_config['loop']['max_depth'])
qgen_wrapper = QGenWrapper(qgen_network, tokenizer,
max_length=eval_config['loop']['max_depth'],
k_best=eval_config['loop']['beam_k_best'])
else:
qgen_wrapper = QGenUserWrapper(tokenizer)
logger.info("No QGen was registered >>> use user input")
looper_evaluator = BasicLooper(eval_config,
oracle_wrapper=oracle_wrapper,
guesser_wrapper=guesser_wrapper,
qgen_wrapper=qgen_wrapper,
tokenizer=tokenizer,
batch_size=1)
logs = []
# Start training
final_val_score = 0.
batchifier = LooperBatchifier(tokenizer, generate_new_games=False)
while True:
# Start new game
while True:
id_str = input('Do you want to play a new game? (Yes/No) --> ').lower()
if id_str == "y" or id_str == "yes": break
elif id_str == "n" or id_str == "no": exit(0)
# Pick id image
image_id = 0
while True:
id_str = int(input('What is the image id you want to select? (-1 for random id) --> '))
if id_str in image_id_set:
image_id = id_str
break
elif id_str == -1:
image_id = random.choice(list(image_id_set.keys()))
break
else:
print("Could not find the following image id: {}".format(id_str))
game = [g for g in dataset.games if g.image.id == image_id][0]
game = copy.deepcopy(game)
print("Selecting image {}".format(game.image.filename))
# Pick id object
print("Available objects")
for i, obj in enumerate(game.objects):
print(" -", i, ":", obj.category, "\t", obj.bbox)
print("Type '(S)how' to display the image with the object")
while True:
id_str = input('Which object id do you want to select? (-1 for random id) --> ')
if id_str == "S" or id_str.lower() == "show":
game.show(img_raw_dir=args.img_raw_dir, display_index=True)
continue
id_str = int(id_str)
if 0 <= id_str < len(game.objects):
object_index = id_str
object_id = game.objects[object_index].id
break
elif id_str == -1:
object_id = random.choice(game.objects).id
break
else:
print("Could not find the following object index: {}".format(id_str))
game.object_id = object_id
dummy_dataset.games = [game]
iterator = BasicIterator(dummy_dataset, batch_size=1, batchifier=batchifier)
success = looper_evaluator.process(sess, iterator, mode="greedy")
| 0 | 0 | 0 |
50ef71ed0fe508f68ab133a1aa5e9959294d900c | 953 | py | Python | setup.py | rlipperts/osm-rasterize | d022d52875f38662ee2f9c2fad2a809ab013befc | [
"MIT"
] | null | null | null | setup.py | rlipperts/osm-rasterize | d022d52875f38662ee2f9c2fad2a809ab013befc | [
"MIT"
] | null | null | null | setup.py | rlipperts/osm-rasterize | d022d52875f38662ee2f9c2fad2a809ab013befc | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
test_deps = [
'pytest',
'flake8',
'pylint',
'mypy',
]
extras = {
'test': test_deps
}
setuptools.setup(
name="osm_rasterize",
version="0.0.0",
author="Ruben Lipperts",
author_email="",
description="Map OSM data onto a grid",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rlipperts/osm-rasterize",
package_dir={'': 'src'},
packages=['osm_rasterize'],
package_data={'osm_rasterize': ['py.typed']},
tests_require=test_deps,
extras_require=extras,
install_requires=[
],
classifiers=[
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Games/Entertainment",
],
python_requires='~=3.9',
)
| 23.825 | 53 | 0.623295 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
test_deps = [
'pytest',
'flake8',
'pylint',
'mypy',
]
extras = {
'test': test_deps
}
setuptools.setup(
name="osm_rasterize",
version="0.0.0",
author="Ruben Lipperts",
author_email="",
description="Map OSM data onto a grid",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/rlipperts/osm-rasterize",
package_dir={'': 'src'},
packages=['osm_rasterize'],
package_data={'osm_rasterize': ['py.typed']},
tests_require=test_deps,
extras_require=extras,
install_requires=[
],
classifiers=[
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Games/Entertainment",
],
python_requires='~=3.9',
)
| 0 | 0 | 0 |
240690a6d09f6ca766d8aaf6d39f2c8a0eca4604 | 1,539 | py | Python | aleph/index/records.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 1 | 2017-07-28T12:54:09.000Z | 2017-07-28T12:54:09.000Z | aleph/index/records.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 7 | 2017-08-16T12:49:23.000Z | 2018-02-16T10:22:11.000Z | aleph/index/records.py | gazeti/aleph | f6714c4be038471cfdc6408bfe88dc9e2ed28452 | [
"MIT"
] | 6 | 2017-07-26T12:29:53.000Z | 2017-08-18T09:35:50.000Z | import six
import time
import logging
from elasticsearch.helpers import BulkIndexError
from aleph.core import es_index, db
from aleph.index.mapping import TYPE_RECORD
from aleph.model import DocumentRecord
from aleph.index.util import bulk_op, query_delete
from aleph.text import index_form
log = logging.getLogger(__name__)
def clear_records(document_id):
"""Delete all records associated with the given document."""
q = {'term': {'document_id': document_id}}
query_delete(q, doc_type=TYPE_RECORD)
def generate_records(document):
"""Generate index records, based on document rows or pages."""
q = db.session.query(DocumentRecord)
q = q.filter(DocumentRecord.document_id == document.id)
for record in q.yield_per(1000):
texts = [record.text]
if record.data is not None:
texts.extend(record.data.values())
yield {
'_id': record.id,
'_type': TYPE_RECORD,
'_index': six.text_type(es_index),
'_source': {
'document_id': document.id,
'collection_id': document.collection_id,
'index': record.index,
'sheet': record.sheet,
'text': index_form(texts)
}
}
| 29.037736 | 66 | 0.630929 | import six
import time
import logging
from elasticsearch.helpers import BulkIndexError
from aleph.core import es_index, db
from aleph.index.mapping import TYPE_RECORD
from aleph.model import DocumentRecord
from aleph.index.util import bulk_op, query_delete
from aleph.text import index_form
log = logging.getLogger(__name__)
def clear_records(document_id):
"""Delete all records associated with the given document."""
q = {'term': {'document_id': document_id}}
query_delete(q, doc_type=TYPE_RECORD)
def generate_records(document):
"""Generate index records, based on document rows or pages."""
q = db.session.query(DocumentRecord)
q = q.filter(DocumentRecord.document_id == document.id)
for record in q.yield_per(1000):
texts = [record.text]
if record.data is not None:
texts.extend(record.data.values())
yield {
'_id': record.id,
'_type': TYPE_RECORD,
'_index': six.text_type(es_index),
'_source': {
'document_id': document.id,
'collection_id': document.collection_id,
'index': record.index,
'sheet': record.sheet,
'text': index_form(texts)
}
}
def index_records(document):
clear_records(document.id)
while True:
try:
bulk_op(generate_records(document))
return
except BulkIndexError as exc:
log.warning('Indexing error: %s', exc)
time.sleep(10)
| 250 | 0 | 23 |
36b609325bdfe89b7a470ae135cd6540b944d807 | 3,397 | py | Python | adb/linux/platform-tools/systrace/catapult/telemetry/telemetry/web_perf/metrics/startup_unittest.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 1 | 2019-01-17T19:03:17.000Z | 2019-01-17T19:03:17.000Z | adb/MACOS/platform-tools/systrace/catapult/telemetry/telemetry/web_perf/metrics/startup_unittest.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 2 | 2017-09-08T20:26:05.000Z | 2017-09-08T20:29:07.000Z | adb/windows/platform-tools/systrace/catapult/telemetry/telemetry/web_perf/metrics/startup_unittest.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import telemetry.timeline.event as timeline_event
from telemetry.testing import test_page_test_results
from telemetry.web_perf.metrics import startup
# Attributes defined outside __init__
# pylint: disable=attribute-defined-outside-init
| 33.633663 | 80 | 0.71622 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import telemetry.timeline.event as timeline_event
from telemetry.testing import test_page_test_results
from telemetry.web_perf.metrics import startup
class StartupTimelineMetricTest(unittest.TestCase):
def setUp(self):
self.events = []
def AddEvent(self, event_name, start, duration=None):
event = timeline_event.TimelineEvent('my_category', event_name,
start, duration)
self.events.append(event)
# Attributes defined outside __init__
# pylint: disable=attribute-defined-outside-init
def ComputeStartupMetrics(self):
results = test_page_test_results.TestPageTestResults(self)
# Create a mock domain.model usable by
# StartupTimelineMetric.AddWholeTraceResults().
def IterateEvents(event_predicate):
for event in self.events:
if event_predicate(event):
yield event
class MockClass(object):
pass
domain.model = MockClass()
domain.model.browser_process = MockClass()
domain.model.browser_process.parent = MockClass()
domain.model.browser_process.parent.IterAllEvents = IterateEvents
startup.StartupTimelineMetric().AddWholeTraceResults(domain.model, results)
return results
def testUntrackedvents(self):
# Code coverage for untracked events
self.AddEvent('uknown_event_0', 0)
self.AddEvent('uknown_event_1', 1)
self.ComputeStartupMetrics()
def testInstantEventsBasedValue(self):
# Test case with instant events to measure the duration between the first
# occurrences of two distinct events.
START0 = 7
START1 = 8
DURATION0 = 17
DURATION1 = 18
# Generate duplicated events to make sure we consider only the first one.
self.AddEvent(startup._MAIN_ENTRY_POINT, START0)
self.AddEvent(startup._MAIN_ENTRY_POINT, START1)
self.AddEvent('loadEventEnd', START0 + DURATION0)
self.AddEvent('loadEventEnd', START1 + DURATION1)
self.AddEvent('requestStart', START0 + DURATION0 * 2)
self.AddEvent('requestStart', START1 + DURATION1 * 2)
results = self.ComputeStartupMetrics()
results.AssertHasPageSpecificScalarValue('foreground_tab_load_complete',
'ms', DURATION0)
results.AssertHasPageSpecificScalarValue('foreground_tab_request_start',
'ms', DURATION0 * 2)
def testDurationEventsBasedValues(self):
DURATION_EVENTS = set([
'messageloop_start_time',
'window_display_time',
'open_tabs_time',
'first_non_empty_paint_time',
'first_main_frame_load_time'])
# Test case to get the duration of the first occurrence of a duration event.
i = 1
for display_name in DURATION_EVENTS:
self.assertTrue(len(startup._METRICS[display_name]) == 1)
event_name = startup._METRICS[display_name][0]
duration = 13 * i
i += 1
# Generate duplicated events to make sure only the first event is
# considered.
self.AddEvent(event_name, 5, duration)
self.AddEvent(event_name, 6, duration + 2)
results = self.ComputeStartupMetrics()
i = 1
for display_name in DURATION_EVENTS:
duration = 13 * i
i += 1
results.AssertHasPageSpecificScalarValue(display_name, 'ms', duration)
| 2,772 | 30 | 172 |
980fb7650b8d4bbe41c230f09be0aa88510e3537 | 169 | py | Python | launch_app.py | LuxembourgTechSchool/lts-fitbitmonitor-service | 1ad6d401f0593569987e8b5f394366f72d3de5ea | [
"MIT"
] | null | null | null | launch_app.py | LuxembourgTechSchool/lts-fitbitmonitor-service | 1ad6d401f0593569987e8b5f394366f72d3de5ea | [
"MIT"
] | null | null | null | launch_app.py | LuxembourgTechSchool/lts-fitbitmonitor-service | 1ad6d401f0593569987e8b5f394366f72d3de5ea | [
"MIT"
] | null | null | null | from gevent.pywsgi import WSGIServer
import app
app_instance = app.create_app()
http_server = WSGIServer(('127.0.0.1', 8080), app_instance)
http_server.serve_forever() | 24.142857 | 59 | 0.786982 | from gevent.pywsgi import WSGIServer
import app
app_instance = app.create_app()
http_server = WSGIServer(('127.0.0.1', 8080), app_instance)
http_server.serve_forever() | 0 | 0 | 0 |
69366283c195dbb9b44eba0e90d333e8a30d304d | 39 | py | Python | bottleneck/slow/__init__.py | fhal/bottleneck | 7147ad85fadbc9c6ffccb05224efa7c380ded4ee | [
"BSD-2-Clause"
] | 1 | 2015-01-30T19:49:12.000Z | 2015-01-30T19:49:12.000Z | bottleneck/slow/__init__.py | fhal/bottleneck | 7147ad85fadbc9c6ffccb05224efa7c380ded4ee | [
"BSD-2-Clause"
] | null | null | null | bottleneck/slow/__init__.py | fhal/bottleneck | 7147ad85fadbc9c6ffccb05224efa7c380ded4ee | [
"BSD-2-Clause"
] | null | null | null |
from func import *
from move import *
| 9.75 | 18 | 0.717949 |
from func import *
from move import *
| 0 | 0 | 0 |
bbd7018fd7f54e3c658bdc186fb2c31dab663594 | 27,454 | py | Python | Main.py | RoboticMind/Gridcoin-Slack-Tipbot | 19ca409b23d3d6463ed916698cf2955c48cbcf47 | [
"MIT"
] | 1 | 2022-03-07T07:49:42.000Z | 2022-03-07T07:49:42.000Z | Main.py | RoboticMind/Gridcoin-Slack-Tipbot | 19ca409b23d3d6463ed916698cf2955c48cbcf47 | [
"MIT"
] | null | null | null | Main.py | RoboticMind/Gridcoin-Slack-Tipbot | 19ca409b23d3d6463ed916698cf2955c48cbcf47 | [
"MIT"
] | null | null | null | from RPC import *
from Slack_Connection import *
from flask import Flask, request, Response
from decimal import *
from fractions import *
from Python_Hash import *
import os
import time
app = Flask(__name__)
@app.route("/" ,methods=["POST"])
@app.route("/Button_Pressed",methods=["POST"])
@app.route("/", methods=['GET'])
@app.route("/new_transaction",methods=["POST"])
if __name__ == "__main__":
global main_json
app.run(debug=False,threaded=True)
| 40.915052 | 451 | 0.665477 | from RPC import *
from Slack_Connection import *
from flask import Flask, request, Response
from decimal import *
from fractions import *
from Python_Hash import *
import os
import time
app = Flask(__name__)
def get_minimum_transaction_size():
with open("Config/min-transaction-size.txt") as file:
return Fraction(file.read())
def get_balances_json():
global main_json
import json
with open("Users.json", "r") as file:
temp = file.read()
temp = json.loads(temp)
main_json = temp
def find_user_attribute(user_id,type_of_input):
global main_json
get_balances_json()
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
return_value = main_json["Users"][x][type_of_input]
if "return_value" in locals():
return return_value
else:
return None
def save_user_lists():
global main_json
import json
with open("Users.json", "w") as file:
file.write(json.dumps(main_json))
def set_user_attribute(user_id,type_of_input,value):
global main_json
get_balances_json()
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
main_json["Users"][x][type_of_input] = value
save_user_lists()
return_value=0
if "return_value" in locals():
return 0
else:
return 1
def add_user(User_ID, Balance):
global main_json
main_json["Users"].append({"User_ID":User_ID, "Balance":str(Balance), "Password":"","Salt":"","Wallet-Addr":"","Faucet_Time":""})
save_user_lists()
def find_user_balance(User_ID):
global main_json
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == User_ID:
return_value = main_json["Users"][x]["Balance"]
if "return_value" in locals():
return return_value
else:
return None
def find_user_password(User_ID):
global main_json
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == User_ID:
return_value = main_json["Users"][x]["Password"]
if "return_value" in locals():
return return_value
else:
return None
def change_user_balance(user_id, value_to_change_by):
global main_json
if find_user_balance(user_id) == None: #adds the user to the json file if they arn't there already
add_user(user_id,0)
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
user_number = x
try:
main_json["Users"][user_number]["Balance"] = str(Fraction(main_json["Users"][user_number]["Balance"]) + Fraction(value_to_change_by))
save_user_lists()
return 0
except Exception as t:
print("AN ERROR OCCURED:\n"+str(t))
return 1
def transfer_money(User_ID_to_take_from, User_ID_to_give_to, value,channel_id):
if change_user_balance(User_ID_to_take_from, -1*value) == 1:
send_message_to_one_user("An Error Occurred While Transferring Please Contact Roboticmind",channel_id,User_ID_to_take_from)
return 1
elif change_user_balance(User_ID_to_give_to,value) == 1:
change_user_balance(User_ID_to_take_from, value)
send_message_to_one_user("An Error Occurred While Transferring. Please Contact Roboticmind",channel_id,User_ID_to_take_from)
return 1
else:
return 0
def Check_Valid_Addr(Address):
if len(Address) != 34:
return True
elif Address[0] != "S" and Address[0] != "R":
return False
for x in range(1,35):
if Address[x] in ["0","O","I","L"]:
return False
return True
def run_command(command, text, user_id, channel_id, code, trigger_id):
global main_json
get_balances_json()
if command == "/tip": #tip user amount password
try:
text = text.split(",")
except:
send_message_to_one_user("*Error:* Please Enter a Valid Command",channel_id,user_id)
return
if find_user_balance(user_id) == None:
send_message_to_one_user("*Error:* This account is not registered use /register",channel_id,user_id)
return
elif len(text) <= 1:
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[{
"label":"Who Do You Want To Tip?",
"name":"user","text":"select a user",
"type":"select",
"data_source":"users"
},
{
"label":"How Many Gridcoin?",
"name":"amount",
"text":"type an amount",
"type":"text",
"subtext":"number"
}
]
if find_user_password(user_id) != "":
selection.append({
"label":"Your Password",
"name":"password",
"text":"type an amount",
"type":"text",
"subtext":"number"
})
GUI("Tip A User Gridcoin!",selection,command_info,trigger_id)
return
else:
info=json.loads(code.split("|")[1])
if info.get("password") != None:
password = "," + info["password"]
else:
password = ""
run_command("/tip", info["user"] + "," + info["amount"] + password,user_id,channel_id,"",None)
return
elif text[0][0] == "@": #this looks at the starting char in the input
send_message_to_one_user("*Error:* Please Don't Use Tagged Usernames (I.E: @User)",channel_id,user_id)
return
password=find_user_password(user_id)
if password != "":
if len(text) < 3:
send_message_to_one_user("*Error:* Please Enter a Command with a user to tip, an amount to tip, and this account requires a password",channel_id,user_id)
return
amount=text[len(text)-2]
users=text[0:len(text)-2]
user_ids = get_multiple_user_ids(text[0:len(text)-2])
inputted_password=text[len(text)-1]
salt=find_user_attribute(user_id,"Salt")
if not checkpassword(inputted_password,password,salt):
send_message_to_one_user("*Error:* Incorrect Password",channel_id,user_id)
return
else:
amount=text[len(text)-1]
user_ids = get_multiple_user_ids(text[0:len(text)-1])
users=text[0:len(text)-1]
users_list_text=""
try:
Fraction(amount)
except:
send_message_to_one_user("*Error:* Please Enter a Valid Command with a number for the amount",channel_id,user_id)
return
if Fraction(find_user_balance(user_id)) < Fraction(amount):
send_message_to_one_user("*Error:* Account Balance Is Lower Than the Amount Attempted to be Transferred",channel_id,user_id)
return
elif Decimal(amount).as_tuple().exponent < -8:
send_message_to_one_user("*Error:* Only 8 Decimal Places Are Supported",channel_id,user_id)
return
elif Fraction(amount) < get_minimum_transaction_size():
send_message_to_one_user("*Error:* Transaction size is under the minimum transaction size of " + str(float(get_minimum_transaction_size())) + " GRC",channel_id,user_id)
return
elif user_id in user_ids:
send_message_to_one_user("*Error:* You Can Not Send Gridcoin To Yourself",channel_id,user_id)
return
elif len(user_ids) < len(users):
send_message_to_one_user("*Error:* One Of The Usernames Inputted Is Invalid",channel_id,user_id)
return
elif Fraction(amount) >= 25 and code != "CONFIRMED|Yes":
Confirm("Are you sure you want to tip " + amount + "GRC?",channel_id,user_id,json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":"CONFIRMED"}))
return
for x in range(0,len(user_ids)):
if transfer_money(user_id,user_ids[x],Fraction(amount)/len(user_ids),channel_id) == 1:
send_message_to_one_user("*Error Transferring Gridcoin*",channel_id,user_id)
for l in range(0,x):
transfer_money(user_id,user_ids[l],Fraction(amount)/len(user_ids),channel_id)
return
for x in range(0,len(user_ids)):
users_list_text+= " |<@" + user_ids[x] + ">|"
if can_bot_post(channel_id):
send_message_by_id(channel_id,"<@" + user_id + ">" + " tipped " + str(round(float(Fraction(amount)/Fraction(len(user_ids))),8)) + " GRC to" + users_list_text)
return
else:
PM_User(user_id,"<@" + user_id + ">" + " tipped " + str(round(float((Fraction(amount)/Fraction(len(user_ids))),8))) + " GRC to" + users_list_text)
for x in range(0,len(user_ids)):
PM_User(user_ids[x],"<@" + user_id + ">" + " tipped " + text[amount] + " GRC to" + users_list_text)
return
elif command == "/withdraw": #withdraw address amount password
try:
text = text.split(",")
except:
send_message_to_one_user("*Error:* Please Enter a Valid Command",channel_id,user_id)
return
if len(text) < 2 or len(text) > 3:
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[{
"label":"Your Gridcoin address",
"name":"address",
"text":"enter an address",
"type":"text",
},
{
"label":"How Many Gridcoin?",
"name":"amount",
"text":"type an amount",
"type":"text",
"subtext":"number"
}
]
if find_user_password(user_id) != "":
selection.append({
"label":"Your Password",
"name":"password",
"text":"type an amount",
"type":"text",
"subtext":"number"
})
GUI("Withdrawl Your Gridcoin",selection,command_info,trigger_id)
return
else:
info=json.loads(code.split("|")[1])
if info.get("password") != None:
password = "," + info["password"]
else:
password = ""
run_command("/withdraw", info["address"] + "," + info["amount"] + password,user_id,channel_id,"",None)
return
send_message_to_one_user("*Error:* Invalid Command",channel_id,user_id)
return
try:
Fraction(text[1])
except:
send_message_to_one_user("*Error:* Invalid Input",channel_id,user_id)
return
if find_user_attribute(user_id,"User_ID") == None:
send_message_to_one_user("*Error:* Your Account Is Not Registered Yet\n Use /register to register your account",channel_id,user_id)
elif Fraction(text[1]) < get_minimum_transaction_size() or Fraction(text[1]) < 0.6:
send_message_to_one_user("*Error:* Transaction too small",channel_id,user_id)
retur
elif Fraction(text[1]) > Fraction(find_user_attribute(user_id,"Balance")):
send_message_to_one_user("*Error:* Your Balance Is Less Than The Amount You Are Trying To Transfer",channel_id,user_id)
return
elif not Check_Valid_Addr(text[0]):
send_message_to_one_user("*Error* Invalid Address",channel_id,user_id)
elif find_user_password(user_id) != "":
if len(text) != 3:
send_message_to_one_user("*Error:* Please Enter a password\n This account requires a password",channel_id,user_id)
return
else:
password = find_user_password(user_id)
salt = find_user_attribute(user_id,"Salt")
if not checkpassword(text[2],password,salt):
send_message_to_one_user("*Error:* incorrect password",channel_id,user_id)
return
output = withdraw(user_id,Fraction(text[1]),text[0])
if output != 1:
send_message_to_one_user("successful withdrawal of " + text[1]+ " Gridcoin (With a Fee of 0.5 GRC)\nTransaction ID:" + str(output),channel_id,user_id)
else:
send_message_to_one_user("An Error Occurred",channel_id,user_id)
elif command == "/deposit":
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
number = x
try:
if main_json["Users"][number]["Wallet-Addr"] != "":
Address=main_json["Users"][number]["Wallet-Addr"]
else:
Address=generate_new_address(user_id)
send_message_to_one_user("Deposit Your Gridcoins To This Address:\n*`"+Address+"`*\nOnce You Send a Transaction To That Address, Don't Send Any More To It Until You Receive Confirmation That Your Transaction Was Received",channel_id,user_id)
except:
send_message_to_one_user("*Error:* Your account isn't registered yet, use /register",channel_id,user_id)
elif command == "/password": # /password add [password] or /password change Old_Password New_password or /password remove [password] or /password help
try:
text = text.split(",")
except:
send_message_to_one_user("*Error:* Please Enter a Valid Command",channel_id,user_id)
return
if text[0] == "help":
send_message_to_one_user("The /password command can add, change, or remove an extra layer of protection\nTo add a password use:\n/password add,[Password]\nTo change your password use:\n/password change,[Old_Password],[New_Password]\nTo Remove Your Password Use:\n/password remove,[Password]\nDon't use spaces in your password\nIf you have forgotten your password please contact Roboticmind ",channel_id,user_id)
return
elif text[0] == "add":
password = find_user_password(user_id)
salt = find_user_attribute("Salt",user_id)
if password != None and password != "":
send_message_to_one_user("*Error:* there already is a password on this account",channel_id,user_id)
if len(text) == 1:
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[{
"label":"Your New Password",
"name":"password",
"text":"enter a password",
"type":"text",
}]
GUI("Set a Password",selection,command_info,trigger_id)
else:
info=json.loads(code.split("|")[1])
run_command("/password", "add," + info["password"],user_id,channel_id,"",None)
return
elif len(text) > 2:
send_message_to_one_user("*Error:* Please Don't Use Commas In Your Password",channel_id,user_id)
return
elif password == "":
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
password, salt = newpassword(text[1])
main_json["Users"][x]["Password"] = password
main_json["Users"][x]["Salt"] = salt
save_user_lists()
send_message_to_one_user("Your Password Has Been Added",channel_id,user_id)
return
elif password == None:
add_user(user_id,0)
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
new_password, new_salt = newpassword(text[1])
main_json["Users"][x]["Password"] = new_password
main_json["Users"][x]["Salt"] = new_salt
save_user_lists()
send_message_to_one_user("Your Account Has Been Created And Your Password Has Been Added",channel_id,user_id)
return
elif text[0] == "change":
password = find_user_password(user_id)
salt = find_user_attribute(user_id,"Salt")
if len(text) <= 2:
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[
{
"label":"Your Old Password",
"name":"old-password",
"text":"enter your current password",
"type":"text"
},
{
"label":"Your New Password",
"name":"new-password",
"text":"enter a new password",
"type":"text",
}]
GUI("Change Your Password",selection,command_info,trigger_id)
else:
info=json.loads(code.split("|")[1])
run_command("/password", "change," + info["old-password"] + ","+info["new-password"],user_id,channel_id,"",None)
return
elif len(text) > 3:
send_message_to_one_user("*Error:* Please Don't Use Commas In Your Password",channel_id,user_id)
return
elif password == None or password == "":
send_message_to_one_user("*Error:* There is no password set",channel_id,user_id)
return
elif checkpassword(text[1],password,salt):
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
new_password, new_salt = newpassword(text[2])
main_json["Users"][x]["Password"] = new_password
main_json["Users"][x]["Salt"] = new_salt
save_user_lists()
send_message_to_one_user("Your Password Has Been Changed",channel_id,user_id)
return
else:
send_message_to_one_user("*Error:* Incorrect Password\n Contact Roboticmind if you have forgotten your password",channel_id,user_id)
return
elif text[0] == "remove":
password = find_user_password(user_id)
salt = find_user_attribute(user_id,"Salt")
if password == None or password == "":
send_message_to_one_user("*Error:* No Password Registered On This Account",channel_id,user_id)
return
elif len(text) != 2:
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[
{
"label":"Your Current Password",
"name":"current-password",
"text":"enter your current password",
"type":"text"
}]
GUI("Remove a Password",selection,command_info,trigger_id)
else:
info=json.loads(code.split("|")[1])
run_command("/password", "remove," + info["current-password"],user_id,channel_id,"",None)
return
elif not checkpassword(text[1],password,salt):
send_message_to_one_user("*Error* Incorrect Password",channel_id,user_id)
return
else:
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
main_json["Users"][x]["Password"] = ""
main_json["Users"][x]["Salt"] = ""
save_user_lists()
send_message_to_one_user("Your Password Has Been Removed",channel_id,user_id)
return
elif text[0] == "":
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[{
"name":"option",
"text":"Select a Password Change",
"type":"select",
"options":[
{
"text":"Add A Password",
"value":"add"
},
{
"text":"Change A Password",
"value":"change"
},
{
"text":"Remove A Password",
"value":"remove"
},
]}]
GUI_no_popup("Password Settings",selection,channel_id,user_id,command_info)
return
else:
info=json.loads(code.split("|")[1])
run_command("/password", info,user_id,channel_id,"",trigger_id)
return
send_message_to_one_user("The /password command can add, change, or remove an extra layer of protection\nTo add a password use:\n/password add,[Password]\nTo change your password use:\n/password change,[Old_Password],[New_Password]\nTo Remove Your Password Use:\n/password remove,[Password]\nDon't use Commas in your password\nIf you have forgotten your password please contact Roboticmind ",channel_id,user_id)
else:
send_message_to_one_user("*Error:* Invalid Command",channel_id,user_id)
return
elif command == "/balance": #/balance or /balance password
return_value = find_user_attribute(user_id,"Balance")
if return_value == None:
send_message_to_one_user("*Error:* this account is not registered yet\n Use /register to register your account",channel_id,user_id)
return
else:
send_message_to_one_user("Your Balance Is:\n" + str(round(float(Fraction(return_value)),8)) + " GRC",channel_id,user_id)
elif command == "/register": #/register or /register password
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
number = x
if "number" in locals():
send_message_to_one_user("*Error:* Your account is already registered, someone may have sent you Gridcoin already",channel_id,user_id)
return
else:
if text == "":
add_user(user_id,0)
send_message_to_one_user("Your account has been registered!",channel_id,user_id)
elif len(text) >= 1:
add_user(user_id,0)
for x in range(0,len(main_json["Users"])):
if main_json["Users"][x]["User_ID"] == user_id:
new_password, new_salt = newpassword(text)
main_json["Users"][x]["Password"] = new_password
main_json["Users"][x]["Salt"] = new_salt
save_user_lists()
send_message_to_one_user("Your account has been registered!",channel_id,user_id)
else:
send_message_to_one_user("*Error:* Invalid Command. Commas Are Not Allowed In Passwords",channel_id,user_id)
elif command == "/test":
send_message_to_one_user("the bot is working",channel_id,user_id)
elif command == "/attribution":
send_message_to_one_user("The bot icon picure can be found at https://www.shareicon.net/gridcoin-grc-117383",channel_id,user_id)
elif command == "/faucet": #faucet give amount password or faucet receive
try:
text = text.split(",")
except:
send_message_to_one_user("*Error:* Invalid Input",channel_id,user_id)
return
if len(text) > 3 or text[0] == "":
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[{
"name":"option",
"text":"Select a Password Change",
"type":"select",
"options":[
{
"text":"Recieve Gridcoin from the faucet",
"value":"receive"
},
{
"text":"Donate to the faucet",
"value":"give"
}
]}]
GUI_no_popup("Fuacet Options",selection,channel_id,user_id,command_info)
return
else:
info=json.loads(code.split("|")[1])
run_command("/faucet", info,user_id,channel_id,"",trigger_id)
return
if text[0] == "give":
if len(text) == 1:
if code == "":
code="Info_Sent"
command_info=json.dumps({"0":command,"1":",".join(text),"2":user_id,"3":channel_id,"4":code})
selection=[{
"label":"Amount To Be Donated",
"name":"amount",
"text":"type an amount",
"type":"text",
"subtext":"number"
}]
if find_user_password(user_id) != "":
selection.append({
"label":"Your Password",
"name":"password",
"text":"type your password",
"type":"text",
"subtext":"number"
})
GUI("Donate To The Faucet!",selection,command_info,trigger_id)
return
else:
info=json.loads(code.split("|")[1])
if info.get("password") != None:
password = "," + info["password"]
else:
password = ""
run_command("/faucet give," + info["amount"] + password,user_id,channel_id,"",None)
return
try:
Fraction(text[1])
except:
send_message_to_one_user("*Error:* Please Enter a Valid Number")
return
password=find_user_password(user_id)
salt=find_user_attribute(user_id,"Salt")
if password == None:
send_message_to_one_user("*Error:* Your Account Is Not Registered Yet, Use /register",channel_id,user_id)
return
elif Fraction(find_user_balance(user_id)) < Fraction(text[1]):
send_message_to_one_user("*Error:* Account Balance Is Lower Than the Amount Attempted to be Transferred",channel_id,user_id)
return
elif Decimal(text[1]).as_tuple().exponent < -8:
send_message_to_one_user("*Error:* Only Eight Decimal Places Are Supported")
return
elif Fraction(text[1]) < get_minimum_transaction_size():
send_message_to_one_user("*Error:* Transaction Too Small*",channel_id,user_id)
return
if password != "":
if len(text) != 3:
send_message_to_one_user("*Error:* Invalid Input, Please Make Sure You Have a Password and Have Two Commas",channel_id,user_id)
return
elif not checkpassword(text[2],password,salt):
send_message_to_one_user("*Error* Incorrect Password",channel_id,user_id)
return
if transfer_money(user_id,"FAUCET-BALANCE",Fraction(text[1]),channel_id) == 0:
if can_bot_post(channel_id):
send_message_by_id(channel_id,"<@" + user_id + ">" + " tipped " + text[1].replace(" ","") + " GRC to the Faucet")
return
else:
PM_User(user_id,"Your Deposit Was Sucessful")
return
elif text[0] == "receive":
if Fraction(find_user_attribute("FAUCET-BALANCE","Balance")) <= Fraction(0):
send_message_to_one_user("*Error:* The Faucet Is Currently Empty",channel_id,user_id)
return
elif find_user_attribute(user_id,"Faucet_Time") != None and find_user_attribute(user_id,"Faucet_Time") != "" and int(time.time()) - int(find_user_attribute(user_id,"Faucet_Time")) < 86400:
send_message_to_one_user("*Error:* Please Wait " + time.strftime("%H hours, %M minutes, and %S seconds",time.gmtime(int(find_user_attribute(user_id,"Faucet_Time"))-int(time.time()+86400))),channel_id,user_id)
return
elif Fraction(find_user_attribute("FAUCET-BALANCE","Balance")) < Fraction(0.5):
amount=Fraction(find_user_attribute("FAUCET-BALANCE","Balance"))
else:
amount=Fraction(0.5)
transfer_money("FAUCET-BALANCE",user_id,amount,channel_id)
if set_user_attribute(user_id,"Faucet_Time",int(time.time())) == 1:
transfer_money("FAUCET-BALANCE",user_id,-amount,channel_id)
send_message_to_one_user("An Error Occured",channel_id,user_id)
send_message_to_one_user("You have sucessfully received" + str(round(float(amount),8)) +" GRC",channel_id,user_id)
else:
run_command("/faucet", "",user_id,channel_id,"",trigger_id)
return
elif command == "/helpgrctip":
if text == "":
text="1"
try:
with open("Config/Help_Messages/" + text + ".txt","r") as file:
send_message_to_one_user(file.read(),channel_id,user_id)
except:
send_message_to_one_user("*Error:* No Help Menu Exists For That Number",channel_id,user_id)
elif command == "/tos":
send_message_to_one_user("By using this bot you are agreeing to these terms of service listed as follows: You are agreeing to not use this bot to scam, launder money or do anything illegal. This bot may temporarily go offline at any point where you might not be able to withdraw Gridcoin until it is back online. Any violation of the terms of service may result in account cancelltion and or reported to the proper authorities.",channel_id,user_id)
@app.route("/" ,methods=["POST"])
def inbound():
run_command(request.form.get("command"),request.form.get("text"),request.form.get("user_id"),request.form.get("channel_id"),"",request.form.get("trigger_id"))
return Response(), 200
@app.route("/Button_Pressed",methods=["POST"])
def reaction():
response = json.loads(request.form.get("payload"))
if response["token"] != get_verification_token():
return Response(),400
if response["type"] == "interactive_message":
if response["actions"][0].get("value") == "No":
return Response("*Request Cancelled*"), 200
else:
print(response["actions"][0])
try:
value=response["actions"][0]["selected_options"][0]["value"]
except:
value=response["actions"][0].get("value")
inputs=json.loads(response["callback_id"])
run_command(inputs["0"],inputs["1"],inputs["2"],inputs["3"],inputs["4"] + "|" + json.dumps(value),response["trigger_id"])
return Response("*Input Confirmed*"), 200
value=response["submission"]
inputs=json.loads(response["callback_id"])
run_command(inputs["0"],inputs["1"],inputs["2"],inputs["3"],inputs["4"] + "|" + json.dumps(value),None)
return Response(""), 200
@app.route("/", methods=['GET'])
def test():
return Response(),200
@app.route("/new_transaction",methods=["POST"])
def check():
if get_verification_token() != request.form.get("Token"):
print("ALERT: NEW TRANSACTION WITH INCORRECT TOKEN")
return Response(),400
check_incoming_transactions(request.form.get("TX_ID"))
return Response(),200
if __name__ == "__main__":
global main_json
app.run(debug=False,threaded=True)
| 26,590 | 0 | 388 |