hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
55820c4e9bc3610f9e11e13031b4f14985df3508
| 7,955
|
py
|
Python
|
pysnmp/ITOUCH-EVENT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/ITOUCH-EVENT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/ITOUCH-EVENT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ITOUCH-EVENT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ITOUCH-EVENT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:46:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
iTouch, DateTime = mibBuilder.importSymbols("ITOUCH-MIB", "iTouch", "DateTime")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Bits, MibIdentifier, IpAddress, Counter32, ObjectIdentity, Counter64, Unsigned32, TimeTicks, iso, Gauge32, ModuleIdentity, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Bits", "MibIdentifier", "IpAddress", "Counter32", "ObjectIdentity", "Counter64", "Unsigned32", "TimeTicks", "iso", "Gauge32", "ModuleIdentity", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
xEvent = MibIdentifier((1, 3, 6, 1, 4, 1, 33, 33))
class EventGroup(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58))
namedValues = NamedValues(("appleTalk", 1), ("appleTalkArps", 2), ("appleTalkRtmp", 3), ("appleTalkZip", 4), ("appleTalkNbp", 5), ("appleTalkTraffic", 6), ("atm", 7), ("backup", 8), ("pcmcia", 9), ("chassis", 10), ("circuit", 11), ("clns", 12), ("decNet", 13), ("decNetTraffic", 14), ("egp", 15), ("esis", 16), ("fddi", 17), ("fddiTraffic", 18), ("frame", 19), ("frameRelay", 20), ("hubManagement", 21), ("interface", 22), ("ip", 23), ("ipRip", 24), ("ipRoutes", 25), ("ipTraffic", 26), ("ipx", 27), ("ipxRip", 28), ("ipxSap", 29), ("isdn", 30), ("isdnQ931", 31), ("isdnTrace", 32), ("isis", 33), ("isisHello", 34), ("isisLsp", 35), ("link", 36), ("lmb", 37), ("lqm", 38), ("ospf", 39), ("ospfHello", 40), ("ospfLsaPacket", 41), ("ospfSpf", 42), ("param", 43), ("ppp", 44), ("session", 45), ("spanningTree", 46), ("snmp", 47), ("switchForwarding", 48), ("switchLoopDetect", 49), ("switchManagement", 50), ("system", 51), ("tcp", 52), ("time", 53), ("tokenRingManagement", 54), ("udp", 55), ("ui", 56), ("vlmp", 57), ("x25", 58))
eventTableSize = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 800)).clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventTableSize.setStatus('mandatory')
eventSeverity = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("discard", 1), ("low", 2), ("medium", 3), ("high", 4))).clone('low')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventSeverity.setStatus('mandatory')
eventTimestamp = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("date", 2), ("time", 3), ("datetime", 4))).clone('datetime')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventTimestamp.setStatus('mandatory')
eventLanguage = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("english", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventLanguage.setStatus('mandatory')
eventClearLog = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("execute", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventClearLog.setStatus('mandatory')
eventEnableAll = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("execute", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventEnableAll.setStatus('mandatory')
eventDisableAll = MibScalar((1, 3, 6, 1, 4, 1, 33, 33, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ready", 1), ("execute", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventDisableAll.setStatus('mandatory')
eventGroupTable = MibTable((1, 3, 6, 1, 4, 1, 33, 33, 8), )
if mibBuilder.loadTexts: eventGroupTable.setStatus('mandatory')
eventGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 33, 8, 1), ).setIndexNames((0, "ITOUCH-EVENT-MIB", "eventGroupIndex"))
if mibBuilder.loadTexts: eventGroupEntry.setStatus('mandatory')
eventGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 8, 1, 1), EventGroup()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventGroupIndex.setStatus('mandatory')
eventGroupState = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: eventGroupState.setStatus('mandatory')
eventTextTable = MibTable((1, 3, 6, 1, 4, 1, 33, 33, 9), )
if mibBuilder.loadTexts: eventTextTable.setStatus('mandatory')
eventTextEntry = MibTableRow((1, 3, 6, 1, 4, 1, 33, 33, 9, 1), ).setIndexNames((0, "ITOUCH-EVENT-MIB", "eventTextGroupIndex"), (0, "ITOUCH-EVENT-MIB", "eventTextEventIndex"))
if mibBuilder.loadTexts: eventTextEntry.setStatus('mandatory')
eventTextGroupIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 1), EventGroup()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextGroupIndex.setStatus('mandatory')
eventTextEventIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextEventIndex.setStatus('mandatory')
eventTextText = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextText.setStatus('mandatory')
eventTextDateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 4), DateTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextDateTime.setStatus('mandatory')
eventTextSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 33, 33, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4))).clone(namedValues=NamedValues(("low", 2), ("medium", 3), ("high", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventTextSeverity.setStatus('mandatory')
mibBuilder.exportSymbols("ITOUCH-EVENT-MIB", eventTextText=eventTextText, eventGroupState=eventGroupState, eventEnableAll=eventEnableAll, eventClearLog=eventClearLog, eventGroupIndex=eventGroupIndex, xEvent=xEvent, eventTextEntry=eventTextEntry, eventGroupTable=eventGroupTable, eventTimestamp=eventTimestamp, eventTableSize=eventTableSize, eventTextTable=eventTextTable, eventTextGroupIndex=eventTextGroupIndex, eventGroupEntry=eventGroupEntry, eventSeverity=eventSeverity, EventGroup=EventGroup, eventDisableAll=eventDisableAll, eventTextDateTime=eventTextDateTime, eventTextSeverity=eventTextSeverity, eventLanguage=eventLanguage, eventTextEventIndex=eventTextEventIndex)
| 139.561404
| 1,032
| 0.726838
|
6983708261accaee0fb9b705386f68b97acf8514
| 498
|
py
|
Python
|
veripy/steps/navigation/actions/press_keyboard_key.py
|
m-martinez/veripy
|
993bb498e4cdac44d76284a624d306aaf2e2215a
|
[
"MIT"
] | null | null | null |
veripy/steps/navigation/actions/press_keyboard_key.py
|
m-martinez/veripy
|
993bb498e4cdac44d76284a624d306aaf2e2215a
|
[
"MIT"
] | null | null | null |
veripy/steps/navigation/actions/press_keyboard_key.py
|
m-martinez/veripy
|
993bb498e4cdac44d76284a624d306aaf2e2215a
|
[
"MIT"
] | null | null | null |
import logging
from behave import when
from veripy import custom_types # noqa
logger = logging.getLogger('navigation')
@when('the user presses the "{keyboard_key:pressable_key_type}" key')
def when_press_key(context, keyboard_key):
""" Press the given key.
::
the user presses the "Return" key
"""
logger.info(f'Pressing the "{keyboard_key}" key.')
active_web_element = context.browser.driver.switch_to.active_element
active_web_element.send_keys(keyboard_key)
| 27.666667
| 72
| 0.73494
|
2e8caab4be5101bd2972a7e99016ae576c48bc9f
| 161
|
py
|
Python
|
tests/web_platform/css_flexbox_1/test_flex_wrap_wrap_reverse.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flex_wrap_wrap_reverse.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flex_wrap_wrap_reverse.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestFlexWrap_WrapReverse(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flex-wrap_wrap-reverse'))
| 26.833333
| 77
| 0.807453
|
5951a2cf6d809b252bd569740997fd02371afe68
| 3,448
|
py
|
Python
|
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/odata_resource_dataset_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/odata_resource_dataset_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/odata_resource_dataset_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .dataset_py3 import Dataset
class ODataResourceDataset(Dataset):
"""The Open Data Protocol (OData) resource dataset.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param description: Dataset description.
:type description: str
:param structure: Columns that define the structure of the dataset. Type:
array (or Expression with resultType array), itemType: DatasetDataElement.
:type structure: object
:param schema: Columns that define the physical type schema of the
dataset. Type: array (or Expression with resultType array), itemType:
DatasetSchemaDataElement.
:type schema: object
:param linked_service_name: Required. Linked service reference.
:type linked_service_name:
~azure.mgmt.datafactory.models.LinkedServiceReference
:param parameters: Parameters for dataset.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param folder: The folder that this Dataset is in. If not specified,
Dataset will appear at the root level.
:type folder: ~azure.mgmt.datafactory.models.DatasetFolder
:param type: Required. Constant filled by server.
:type type: str
:param path: The OData resource path. Type: string (or Expression with
resultType string).
:type path: object
"""
_validation = {
'linked_service_name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'description': {'key': 'description', 'type': 'str'},
'structure': {'key': 'structure', 'type': 'object'},
'schema': {'key': 'schema', 'type': 'object'},
'linked_service_name': {'key': 'linkedServiceName', 'type': 'LinkedServiceReference'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'folder': {'key': 'folder', 'type': 'DatasetFolder'},
'type': {'key': 'type', 'type': 'str'},
'path': {'key': 'typeProperties.path', 'type': 'object'},
}
def __init__(self, *, linked_service_name, additional_properties=None, description: str=None, structure=None, schema=None, parameters=None, annotations=None, folder=None, path=None, **kwargs) -> None:
super(ODataResourceDataset, self).__init__(additional_properties=additional_properties, description=description, structure=structure, schema=schema, linked_service_name=linked_service_name, parameters=parameters, annotations=annotations, folder=folder, **kwargs)
self.path = path
self.type = 'ODataResource'
| 47.232877
| 270
| 0.666763
|
b08cff79c754d41293c8f65359b099619996a4b6
| 2,161
|
py
|
Python
|
pygazebo/msg/sonar_stamped_pb2.py
|
WindhoverLabs/pygazebo
|
9c977703be5c04fe931e7ec522fb7aa1e6bbe05e
|
[
"Apache-2.0"
] | null | null | null |
pygazebo/msg/sonar_stamped_pb2.py
|
WindhoverLabs/pygazebo
|
9c977703be5c04fe931e7ec522fb7aa1e6bbe05e
|
[
"Apache-2.0"
] | null | null | null |
pygazebo/msg/sonar_stamped_pb2.py
|
WindhoverLabs/pygazebo
|
9c977703be5c04fe931e7ec522fb7aa1e6bbe05e
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sonar_stamped.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import time_pb2
import sonar_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sonar_stamped.proto',
package='gazebo.msgs',
serialized_pb='\n\x13sonar_stamped.proto\x12\x0bgazebo.msgs\x1a\ntime.proto\x1a\x0bsonar.proto\"R\n\x0cSonarStamped\x12\x1f\n\x04time\x18\x01 \x02(\x0b\x32\x11.gazebo.msgs.Time\x12!\n\x05sonar\x18\x02 \x02(\x0b\x32\x12.gazebo.msgs.Sonar')
_SONARSTAMPED = _descriptor.Descriptor(
name='SonarStamped',
full_name='gazebo.msgs.SonarStamped',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time', full_name='gazebo.msgs.SonarStamped.time', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sonar', full_name='gazebo.msgs.SonarStamped.sonar', index=1,
number=2, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=61,
serialized_end=143,
)
_SONARSTAMPED.fields_by_name['time'].message_type = time_pb2._TIME
_SONARSTAMPED.fields_by_name['sonar'].message_type = sonar_pb2._SONAR
DESCRIPTOR.message_types_by_name['SonarStamped'] = _SONARSTAMPED
class SonarStamped(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SONARSTAMPED
# @@protoc_insertion_point(class_scope:gazebo.msgs.SonarStamped)
# @@protoc_insertion_point(module_scope)
| 31.318841
| 240
| 0.763998
|
1078f89303a730ec37e746c907e1a30f1cb3b056
| 5,871
|
py
|
Python
|
test/test_GetUrls.py
|
AngusLean/wechat_articles_spider
|
ff268beb8dbe774fb1ed87e425668f02f93a6c08
|
[
"Apache-2.0"
] | null | null | null |
test/test_GetUrls.py
|
AngusLean/wechat_articles_spider
|
ff268beb8dbe774fb1ed87e425668f02f93a6c08
|
[
"Apache-2.0"
] | null | null | null |
test/test_GetUrls.py
|
AngusLean/wechat_articles_spider
|
ff268beb8dbe774fb1ed87e425668f02f93a6c08
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import html
import os
import random
import re
import time
from pprint import pprint
import json
# import pandas as pd # 如果需要保存至excel表格的话
import requests
from wechatarticles import ArticlesInfo, ArticlesUrls
from wechatarticles.GetUrls import MobileUrls, PCUrls
from wechatarticles.ReadOutfile import Reader
def flatten(x):
return [y for l in x for y in flatten(l)] if type(x) is list else [x]
def transfer_url(url):
url = html.unescape(html.unescape(url))
return eval(repr(url).replace('\\', ''))
def verify_url(article_url):
verify_lst = ["mp.weixin.qq.com", "__biz", "mid", "sn", "idx"]
for string in verify_lst:
if string not in article_url:
return False
return True
def get_all_urls(urls):
# 获取所有的url
url_lst = []
for item in urls:
url_lst.append(transfer_url(item['app_msg_ext_info']['content_url']))
if 'multi_app_msg_item_list' in item['app_msg_ext_info'].keys():
for ss in item['app_msg_ext_info']['multi_app_msg_item_list']:
url_lst.append(transfer_url(ss['content_url']))
return url_lst
def get_all_urls_title_date(urls):
# 获取所有的[url, title, date]
url_lst = []
for item in urls:
timestamp = item['comm_msg_info']['datetime']
time_local = time.localtime(timestamp)
# 转换成日期
time_temp = time.strftime("%Y-%m-%d", time_local)
# 文章url
url_temp = transfer_url(item['app_msg_ext_info']['content_url'])
# 文章标题
title_temp = item['app_msg_ext_info']['title']
url_lst.append([url_temp, title_temp, time_temp])
if 'multi_app_msg_item_list' in item['app_msg_ext_info'].keys():
for info in item['app_msg_ext_info']['multi_app_msg_item_list']:
url_temp = transfer_url(info['content_url'])
title_temp = info['title']
url_lst.append([url_temp, title_temp, time_temp])
return url_lst
def method_one(biz, uin, cookie):
t = PCUrls(biz=biz, uin=uin, cookie=cookie)
count = 0
lst = []
while True:
res = t.get_urls(key, offset=count)
count += 10
lst.append(res)
return method_one
def method_two(biz, cookie):
t = MobileUrls(biz=biz, cookie=cookie)
count = 0
lst = []
while True:
res = t.get_urls(appmsg_token, offset=count)
count += 10
lst.append(res)
return method_two
def get_info_from_url(url):
html = requests.get(url).text
try:
res = re.findall(r'publish_time =.+\|\|?', html)
date = res[0].split('=')[1].split('||')[0].strip()
except:
date = None
try:
res = re.findall(r'nickname .+;?', html)
offical_name = res[0].split('=')[1][:-1].strip()
except:
offical_name = None
try:
res = re.findall(r'msg_title = .+;?', html)
aritlce_name = res[0].split('=')[1][:-1].strip()
except:
aritlce_name = None
return date, offical_name, aritlce_name
def save_xlsx(fj, lst):
df = pd.DataFrame(
lst,
columns=['url', 'title', 'date', 'read_num', 'like_num', 'comments'])
df.to_excel(fj + '.xlsx', encoding='utf-8')
def get_data(url):
pass
if __name__ == '__main__':
'''
# 方法一:使用PCUrls。已在win10下测试
# 需要抓取公众号的__biz参数
biz = ''
# 个人微信号登陆后获取的uin
uin = ''
# 个人微信号登陆后获取的cookie
cookie = ''
# 个人微信号登陆后获取的key,隔段时间更新
key = ''
lst = method_one(biz, uin, cookie)
# 个人微信号登陆后获取的token
appmsg_token = ''
'''
# 方法二:使用MobileUrls。已在Ubuntu下测试
#------------method_one
# 自动获取参数
from ReadOutfile import Reader
biz = biz
# 自动获取appmsg_token, cookie
outfile = 'outfile'
reader = Reader()
reader.contral(outfile)
appmsg_token, cookie = reader.request(outfile)
# 通过抓包工具,手动获取appmsg_token, cookie,手动输入参数
# appmsg_token = appmsg_token
# cookie = cookie
#----------method_two
lst = method_two(biz, cookie)
# 碾平数组
# lst = flatten(lst)
# 提取url
# url_lst = get_all_urls(lst)
# 获取点赞数、阅读数、评论信息
test = ArticlesInfo(appmsg_token, cookie)
"""
data_lst = []
for i, url in enumerate(url_lst):
item = test.comments(url)
temp_lst = [url, item]
try:
read_num, like_num = test.read_like_nums(url)
temp_lst.append(read_num)
temp_lst.append(like_num)
except:
print("第{}个爬取失败,请更新参数".format(i + 1))
break
data_lst.append(temp_lst)
"""
# 存储历史文章信息的json
data = []
fj = '公众号名称'
item_lst = []
for i, line in enumerate(data, 0):
print("index:", i)
item = json.loads('{' + line + '}', strict=False)
timestamp = item["comm_msg_info"]["datetime"]
ymd = time.localtime(timestamp)
date = '{}-{}-{}'.format(ymd.tm_year, ymd.tm_mon, ymd.tm_mday)
infos = item['app_msg_ext_info']
url_title_lst = [[infos['content_url'], infos['title']]]
if 'multi_app_msg_item_list' in infos.keys():
url_title_lst += [[info['content_url'], info['title']]
for info in infos['multi_app_msg_item_list']]
for url, title in url_title_lst:
try:
if not verify_url(url):
continue
read_num, like_num, comments = get_data(url)
print(read_num, like_num, len(comments))
item_lst.append(
[url, title, date, read_num, like_num, comments])
time.sleep(random.randint(5, 10))
except Exception as e:
print(e)
flag = 1
break
# finally:
# save_xlsx(fj, item_lst)
if flag == 1:
break
# save_xlsx(fj, item_lst)
| 25.415584
| 77
| 0.580991
|
2f6f904b8e2ee080fff64045638f65ca568a166c
| 219
|
py
|
Python
|
setup.py
|
ashili/StatCalculator
|
a221d6c8cd7cd0b324158b15f6cb23e58bf594a3
|
[
"MIT"
] | 1
|
2020-07-09T04:05:45.000Z
|
2020-07-09T04:05:45.000Z
|
setup.py
|
ashili/StatCalculator
|
a221d6c8cd7cd0b324158b15f6cb23e58bf594a3
|
[
"MIT"
] | null | null | null |
setup.py
|
ashili/StatCalculator
|
a221d6c8cd7cd0b324158b15f6cb23e58bf594a3
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='Calculator',
version='0.0.1',
description='Calculator',
author='',
author_email='',
url='',
packages=find_packages(),
)
| 19.909091
| 43
| 0.593607
|
361687572c2d14058ae481e92acba205f0096066
| 5,265
|
py
|
Python
|
rnn/external_rnn.py
|
fberanizo/neural_network
|
aa48707ea3de80bcf83176b0c3379f935ab01843
|
[
"BSD-2-Clause"
] | null | null | null |
rnn/external_rnn.py
|
fberanizo/neural_network
|
aa48707ea3de80bcf83176b0c3379f935ab01843
|
[
"BSD-2-Clause"
] | null | null | null |
rnn/external_rnn.py
|
fberanizo/neural_network
|
aa48707ea3de80bcf83176b0c3379f935ab01843
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy, matplotlib.pyplot as plt, time
from sklearn.metrics import mean_squared_error, accuracy_score, roc_auc_score
class ExternalRNN(object):
"""Class that implements a External Recurent Neural Network"""
def __init__(self, hidden_layer_size=3, learning_rate=0.2, max_epochs=1000, delays=2):
self.hidden_layer_size = hidden_layer_size
self.learning_rate = learning_rate
self.max_epochs = max_epochs
self.delays = delays
self.auc = 0.5
def fit(self, X, y):
"""Trains the network and returns the trained network"""
self.input_layer_size = X.shape[1]
self.output_layer_size = y.shape[1]
remaining_epochs = self.max_epochs
# Initialize weights
self.W1 = numpy.random.rand(1 + self.input_layer_size, self.hidden_layer_size)
self.W2 = numpy.random.rand(1 + self.hidden_layer_size, self.output_layer_size)
self.W3 = numpy.random.rand(self.output_layer_size * self.delays, self.hidden_layer_size)
self.Ydelayed = numpy.zeros((1, self.output_layer_size * self.delays))
epsilon = 0.001
error = 1
self.J = [] # error
# Repeats until error is small enough or max epochs is reached
while error > epsilon and remaining_epochs > 0:
total_error = numpy.array([])
# For each input instance
for self.X, self.y in zip(X, y):
self.X = numpy.array([self.X])
self.y = numpy.array([self.y])
error, gradients = self.single_step(self.X, self.y)
total_error = numpy.append(total_error, error)
dJdW1 = gradients[0]
dJdW2 = gradients[1]
dJdW3 = gradients[2]
# Calculates new weights
self.W1 = self.W1 - self.learning_rate * dJdW1
self.W2 = self.W2 - self.learning_rate * dJdW2
self.W3 = self.W3 - self.learning_rate * dJdW3
# Shift Ydelayed values through time
self.Ydelayed = numpy.roll(self.Ydelayed, 1, 1)
self.Ydelayed[:,::self.delays] = self.Y
# Saves error for plot
error = total_error.mean()
self.J.append(error)
#print 'Epoch: ' + str(remaining_epochs)
#print 'Error: ' + str(error)
remaining_epochs -= 1
# After training, we plot error in order to see how it behaves
#plt.plot(self.J[1:])
#plt.grid(1)
#plt.ylabel('Cost')
#plt.xlabel('Iterations')
#plt.show()
return self
def predict(self, X):
"""Predicts test values"""
Y = map(lambda x: self.forward(numpy.array([x]))[0], X)
Y = map(lambda y: 1 if y > self.auc else 0, Y)
return numpy.array(Y)
def score(self, X, y_true):
"""Calculates accuracy"""
y_pred = map(lambda x: self.forward(numpy.array([x]))[0], X)
auc = roc_auc_score(y_true, y_pred)
y_pred = map(lambda y: 1 if y > self.auc else 0, y_pred)
y_pred = numpy.array(y_pred)
return accuracy_score(y_true.flatten(), y_pred.flatten())
def single_step(self, X, y):
"""Runs single step training method"""
self.Y = self.forward(X)
cost = self.cost(self.Y, y)
gradients = self.backpropagate(X, y)
return cost, gradients
def forward(self, X):
"""Passes input values through network and return output values"""
self.Zin = numpy.dot(X, self.W1[:-1,:])
self.Zin += numpy.dot(numpy.ones((1, 1)), self.W1[-1:,:])
self.Zin += numpy.dot(self.Ydelayed, self.W3)
self.Z = self.sigmoid(self.Zin)
self.Z = numpy.nan_to_num(self.Z)
self.Yin = numpy.dot(self.Z, self.W2[:-1,])
self.Yin += numpy.dot(numpy.ones((1, 1)), self.W2[-1:,:])
Y = self.linear(self.Yin)
Y = numpy.nan_to_num(Y)
return Y
def cost(self, Y, y):
"""Calculates network output error"""
return mean_squared_error(Y, y)
def backpropagate(self, X, y):
"""Backpropagates costs through the network"""
delta3 = numpy.multiply(-(y-self.Y), self.linear_derivative(self.Yin))
dJdW2 = numpy.dot(self.Z.T, delta3)
dJdW2 = numpy.append(dJdW2, numpy.dot(numpy.ones((1, 1)), delta3), axis=0)
delta2 = numpy.dot(delta3, self.W2[:-1,:].T)*self.sigmoid_derivative(self.Zin)
dJdW1 = numpy.dot(X.T, delta2)
dJdW1 = numpy.append(dJdW1, numpy.dot(numpy.ones((1, 1)), delta2), axis=0)
dJdW3 = numpy.dot(numpy.repeat(self.Ydelayed, self.output_layer_size * self.delays, 0), \
numpy.repeat(delta2, self.output_layer_size * self.delays, 0))
return dJdW1, dJdW2, dJdW3
def sigmoid(self, z):
"""Apply sigmoid activation function"""
return 1/(1+numpy.exp(-z))
def sigmoid_derivative(self, z):
"""Derivative of sigmoid function"""
return numpy.exp(-z)/((1+numpy.exp(-z))**2)
def linear(self, z):
"""Apply linear activation function"""
return z
def linear_derivative(self, z):
"""Derivarive linear function"""
return 1
| 37.077465
| 97
| 0.588224
|
c0d4cc01a91baf18dd02165da251d155093e1bb3
| 4,254
|
py
|
Python
|
var/spack/repos/builtin/packages/slurm/package.py
|
asmaahassan90/spack
|
b6779d2e31170eb77761f59bed640afbc469e4ec
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2020-05-24T15:23:12.000Z
|
2020-05-24T15:23:12.000Z
|
var/spack/repos/builtin/packages/slurm/package.py
|
danlipsa/spack
|
699ae50ebf13ee425a482988ccbd4c3c994ab5e6
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
var/spack/repos/builtin/packages/slurm/package.py
|
danlipsa/spack
|
699ae50ebf13ee425a482988ccbd4c3c994ab5e6
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2021-01-06T18:58:26.000Z
|
2021-01-06T18:58:26.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Slurm(AutotoolsPackage):
"""Slurm is an open source, fault-tolerant, and highly scalable cluster
management and job scheduling system for large and small Linux clusters.
Slurm requires no kernel modifications for its operation and is relatively
self-contained. As a cluster workload manager, Slurm has three key
functions. First, it allocates exclusive and/or non-exclusive access to
resources (compute nodes) to users for some duration of time so they can
perform work. Second, it provides a framework for starting, executing,
and monitoring work (normally a parallel job) on the set of allocated
nodes. Finally, it arbitrates contention for resources by managing a
queue of pending work.
"""
homepage = 'https://slurm.schedmd.com'
url = 'https://github.com/SchedMD/slurm/archive/slurm-19-05-6-1.tar.gz'
version('19-05-6-1', sha256='1b83bce4260af06d644253b1f2ec2979b80b4418c631e9c9f48c2729ae2c95ba')
version('19-05-5-1', sha256='e53e67bd0bb4c37a9c481998764a746467a96bc41d6527569080514f36452c07')
version('18-08-9-1', sha256='32eb0b612ca18ade1e35c3c9d3b4d71aba2b857446841606a9e54d0a417c3b03')
version('18-08-0-1', sha256='62129d0f2949bc8a68ef86fe6f12e0715cbbf42f05b8da6ef7c3e7e7240b50d9')
version('17-11-9-2', sha256='6e34328ed68262e776f524f59cca79ac75bcd18030951d45ea545a7ba4c45906')
version('17-02-6-1', sha256='97b3a3639106bd6d44988ed018e2657f3d640a3d5c105413d05b4721bc8ee25e')
variant('gtk', default=False, description='Enable GTK+ support')
variant('mariadb', default=False, description='Use MariaDB instead of MySQL')
variant('hwloc', default=False, description='Enable hwloc support')
variant('hdf5', default=False, description='Enable hdf5 support')
variant('readline', default=True, description='Enable readline support')
variant('pmix', default=False, description='Enable PMIx support')
variant('sysconfdir', default='PREFIX/etc', values=any,
description='Set system configuration path (possibly /etc/slurm)')
# TODO: add variant for BG/Q and Cray support
# TODO: add support for checkpoint/restart (BLCR)
# TODO: add support for lua
depends_on('curl')
depends_on('glib')
depends_on('json-c')
depends_on('lz4')
depends_on('munge')
depends_on('openssl')
depends_on('pkgconfig', type='build')
depends_on('readline', when='+readline')
depends_on('zlib')
depends_on('gtkplus', when='+gtk')
depends_on('hdf5', when='+hdf5')
depends_on('hwloc', when='+hwloc')
depends_on('mariadb', when='+mariadb')
depends_on('pmix', when='+pmix')
def configure_args(self):
spec = self.spec
args = [
'--with-libcurl={0}'.format(spec['curl'].prefix),
'--with-json={0}'.format(spec['json-c'].prefix),
'--with-lz4={0}'.format(spec['lz4'].prefix),
'--with-munge={0}'.format(spec['munge'].prefix),
'--with-ssl={0}'.format(spec['openssl'].prefix),
'--with-zlib={0}'.format(spec['zlib'].prefix),
]
if '~gtk' in spec:
args.append('--disable-gtktest')
if '~readline' in spec:
args.append('--without-readline')
if '+hdf5' in spec:
args.append(
'--with-hdf5={0}'.format(spec['hdf5'].prefix.bin.h5cc)
)
else:
args.append('--without-hdf5')
if '+hwloc' in spec:
args.append('--with-hwloc={0}'.format(spec['hwloc'].prefix))
else:
args.append('--without-hwloc')
if '+pmix' in spec:
args.append('--with-pmix={0}'.format(spec['pmix'].prefix))
else:
args.append('--without-pmix')
sysconfdir = spec.variants['sysconfdir'].value
if sysconfdir != 'PREFIX/etc':
args.append('--sysconfdir={0}'.format(sysconfdir))
return args
def install(self, spec, prefix):
make('install')
make('-C', 'contribs/pmi2', 'install')
| 38.324324
| 99
| 0.657969
|
6ca2b8b4791bad1b3561c34f8ab1cf0c38e25794
| 1,433
|
py
|
Python
|
web/feeds.py
|
nonomal/oh-my-rss
|
68b9284e0acaf44ea389d675b71949177f9f3256
|
[
"MIT"
] | 270
|
2019-09-05T05:51:19.000Z
|
2022-03-12T18:26:13.000Z
|
web/feeds.py
|
nonomal/oh-my-rss
|
68b9284e0acaf44ea389d675b71949177f9f3256
|
[
"MIT"
] | 6
|
2019-09-06T03:52:47.000Z
|
2021-04-10T06:21:14.000Z
|
web/feeds.py
|
nonomal/oh-my-rss
|
68b9284e0acaf44ea389d675b71949177f9f3256
|
[
"MIT"
] | 37
|
2019-09-06T05:13:24.000Z
|
2022-01-21T08:05:33.000Z
|
from django.contrib.syndication.views import Feed
from django.urls import reverse
from .models import Site, Article
from web.utils import get_content
class SiteFeed(Feed):
ttl = 12 * 3600
def get_object(self, request, site_id):
try:
return Site.objects.get(pk=site_id, status='active', creator__in=('system', 'wemp'))
except ValueError:
return Site.objects.get(name=site_id, status='active', creator__in=('system', 'wemp'))
def title(self, site):
return site.cname
def link(self, site):
return site.link
def description(self, site):
return site.brief
def feed_url(self, site):
return reverse('get_feed_entries', kwargs={"site_id": site.pk})
def author_name(self, site):
return site.author
def categories(self, site):
return ''
def feed_copyright(self, site):
if site.creator == 'wemp':
return site.favicon
return ''
def items(self, site):
return Article.objects.filter(site=site, status='active').order_by('-id')[:30]
def item_title(self, item):
return item.title
def item_description(self, item):
return get_content(item.uindex, item.site_id)
def item_link(self, item):
return item.src_url
def item_author_name(self, item):
return item.author
def item_pubdate(self, item):
return item.ctime
| 25.140351
| 98
| 0.638521
|
22f80d0abbb0e0dc917ecc3ac2984c412730869b
| 1,259
|
py
|
Python
|
p2.py
|
iagooteroc/spark
|
1d6f34a076ef949697b385d5c9f68368c41f8562
|
[
"MIT"
] | null | null | null |
p2.py
|
iagooteroc/spark
|
1d6f34a076ef949697b385d5c9f68368c41f8562
|
[
"MIT"
] | null | null | null |
p2.py
|
iagooteroc/spark
|
1d6f34a076ef949697b385d5c9f68368c41f8562
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from pyspark import SparkContext, SparkConf
import fitting_alignment
n_proc = "local[8]"
conf = SparkConf().setAppName("p2").setMaster(n_proc)
sc = SparkContext(conf=conf)
sc.setLogLevel("INFO")
working_dir = ''
dataset_dir = working_dir + 'dataset.txt'
cadena_dir = working_dir + 'cadena.txt'
# Lee el fichero de texto y crea un elemento en el RDD por cada línea
rddCadenas = sc.textFile(dataset_dir)
# Leemos la cadena de referencia
cadena_f = open(cadena_dir, "r")
cadena = cadena_f.read()
# Eliminamos el salto de línea (\n) al final de la cadena
cadena = cadena[:-1]
cadena_f.close()
# Aplicamos la función de fitting (eliminando el último caracter de c porque es un '\n')
rddAlineamientos = rddCadenas.map(lambda c: fitting_alignment.alinea(c[:-1],cadena))#.cache()
best_al = rddAlineamientos.max(lambda x: x[0])
worst_al = rddAlineamientos.min(lambda x: x[0])
print('###################################')
print('n_proc:',n_proc)
print('===================================')
print('Mayor puntuación:')
print(best_al)
print('===================================')
print('Menor puntuación:')
print(worst_al)
print('===================================')
input("Press Enter to finish...")
print('###################################')
| 33.131579
| 93
| 0.633042
|
dafa9890288f8cda740b6257924d5bd50e0377b5
| 447
|
py
|
Python
|
openshift/e2e-add-service-account.py
|
bbrowning/tektoncd-catalog
|
f5d5e85b0c24b76356b35f21e8f6d4fdc70b05c8
|
[
"Apache-2.0"
] | null | null | null |
openshift/e2e-add-service-account.py
|
bbrowning/tektoncd-catalog
|
f5d5e85b0c24b76356b35f21e8f6d4fdc70b05c8
|
[
"Apache-2.0"
] | null | null | null |
openshift/e2e-add-service-account.py
|
bbrowning/tektoncd-catalog
|
f5d5e85b0c24b76356b35f21e8f6d4fdc70b05c8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# This will add a serviceAccount to a TaskRun/PipelineRun with pyyaml via
# STDIN/STDOUT eg:
#
# python openshift/e2e-add-service-account-tr.py \
# SERVICE_ACCOUNT < run.yaml > newfile.yaml
#
import yaml
import sys
data = list(yaml.load_all(sys.stdin))
for x in data:
if x['kind'] in ('PipelineRun', 'TaskRun'):
x['spec']['serviceAccountName'] = sys.argv[1]
print(yaml.dump_all(data, default_flow_style=False))
| 29.8
| 73
| 0.713647
|
55428178b4d9d8f2e18c1c477d8829eb3498aeee
| 10,199
|
py
|
Python
|
sdk/python/pulumi_google_native/networkmanagement/v1beta1/get_connectivity_test.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/networkmanagement/v1beta1/get_connectivity_test.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/networkmanagement/v1beta1/get_connectivity_test.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConnectivityTestResult',
'AwaitableGetConnectivityTestResult',
'get_connectivity_test',
'get_connectivity_test_output',
]
@pulumi.output_type
class GetConnectivityTestResult:
def __init__(__self__, create_time=None, description=None, destination=None, display_name=None, labels=None, name=None, probing_details=None, protocol=None, reachability_details=None, related_projects=None, source=None, update_time=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if destination and not isinstance(destination, dict):
raise TypeError("Expected argument 'destination' to be a dict")
pulumi.set(__self__, "destination", destination)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if probing_details and not isinstance(probing_details, dict):
raise TypeError("Expected argument 'probing_details' to be a dict")
pulumi.set(__self__, "probing_details", probing_details)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if reachability_details and not isinstance(reachability_details, dict):
raise TypeError("Expected argument 'reachability_details' to be a dict")
pulumi.set(__self__, "reachability_details", reachability_details)
if related_projects and not isinstance(related_projects, list):
raise TypeError("Expected argument 'related_projects' to be a list")
pulumi.set(__self__, "related_projects", related_projects)
if source and not isinstance(source, dict):
raise TypeError("Expected argument 'source' to be a dict")
pulumi.set(__self__, "source", source)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time the test was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> str:
"""
The user-supplied description of the Connectivity Test. Maximum of 512 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def destination(self) -> 'outputs.EndpointResponse':
"""
Destination specification of the Connectivity Test. You can use a combination of destination IP address, Compute Engine VM instance, or VPC network to uniquely identify the destination location. Even if the destination IP address is not unique, the source IP location is unique. Usually, the analysis can infer the destination endpoint from route information. If the destination you specify is a VM instance and the instance has multiple network interfaces, then you must also specify either a destination IP address or VPC network to identify the destination interface. A reachability analysis proceeds even if the destination location is ambiguous. However, the result can include endpoints that you don't intend to test.
"""
return pulumi.get(self, "destination")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The display name of a Connectivity Test.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Resource labels to represent user-provided metadata.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
Unique name of the resource using the form: `projects/{project_id}/locations/global/connectivityTests/{test}`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="probingDetails")
def probing_details(self) -> 'outputs.ProbingDetailsResponse':
"""
The probing details of this test from the latest run, present for applicable tests only. The details are updated when creating a new test, updating an existing test, or triggering a one-time rerun of an existing test.
"""
return pulumi.get(self, "probing_details")
@property
@pulumi.getter
def protocol(self) -> str:
"""
IP Protocol of the test. When not provided, "TCP" is assumed.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="reachabilityDetails")
def reachability_details(self) -> 'outputs.ReachabilityDetailsResponse':
"""
The reachability details of this test from the latest run. The details are updated when creating a new test, updating an existing test, or triggering a one-time rerun of an existing test.
"""
return pulumi.get(self, "reachability_details")
@property
@pulumi.getter(name="relatedProjects")
def related_projects(self) -> Sequence[str]:
"""
Other projects that may be relevant for reachability analysis. This is applicable to scenarios where a test can cross project boundaries.
"""
return pulumi.get(self, "related_projects")
@property
@pulumi.getter
def source(self) -> 'outputs.EndpointResponse':
"""
Source specification of the Connectivity Test. You can use a combination of source IP address, virtual machine (VM) instance, or Compute Engine network to uniquely identify the source location. Examples: If the source IP address is an internal IP address within a Google Cloud Virtual Private Cloud (VPC) network, then you must also specify the VPC network. Otherwise, specify the VM instance, which already contains its internal IP address and VPC network information. If the source of the test is within an on-premises network, then you must provide the destination VPC network. If the source endpoint is a Compute Engine VM instance with multiple network interfaces, the instance itself is not sufficient to identify the endpoint. So, you must also specify the source IP address or VPC network. A reachability analysis proceeds even if the source location is ambiguous. However, the test result may include endpoints that you don't intend to test.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> str:
"""
The time the test's configuration was updated.
"""
return pulumi.get(self, "update_time")
class AwaitableGetConnectivityTestResult(GetConnectivityTestResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectivityTestResult(
create_time=self.create_time,
description=self.description,
destination=self.destination,
display_name=self.display_name,
labels=self.labels,
name=self.name,
probing_details=self.probing_details,
protocol=self.protocol,
reachability_details=self.reachability_details,
related_projects=self.related_projects,
source=self.source,
update_time=self.update_time)
def get_connectivity_test(connectivity_test_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectivityTestResult:
"""
Gets the details of a specific Connectivity Test.
"""
__args__ = dict()
__args__['connectivityTestId'] = connectivity_test_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:networkmanagement/v1beta1:getConnectivityTest', __args__, opts=opts, typ=GetConnectivityTestResult).value
return AwaitableGetConnectivityTestResult(
create_time=__ret__.create_time,
description=__ret__.description,
destination=__ret__.destination,
display_name=__ret__.display_name,
labels=__ret__.labels,
name=__ret__.name,
probing_details=__ret__.probing_details,
protocol=__ret__.protocol,
reachability_details=__ret__.reachability_details,
related_projects=__ret__.related_projects,
source=__ret__.source,
update_time=__ret__.update_time)
@_utilities.lift_output_func(get_connectivity_test)
def get_connectivity_test_output(connectivity_test_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConnectivityTestResult]:
"""
Gets the details of a specific Connectivity Test.
"""
...
| 47.658879
| 958
| 0.689774
|
c4066854fa767de41c78c9973267e208c5d738f4
| 11,419
|
py
|
Python
|
tests/test_authorizer.py
|
dotlambda/prawcore
|
ec23b29186fd4cbfffa7a156620518dbee845472
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_authorizer.py
|
dotlambda/prawcore
|
ec23b29186fd4cbfffa7a156620518dbee845472
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_authorizer.py
|
dotlambda/prawcore
|
ec23b29186fd4cbfffa7a156620518dbee845472
|
[
"BSD-2-Clause"
] | null | null | null |
"""Test for prawcore.auth.Authorizer classes."""
import prawcore
import unittest
from .config import (CLIENT_ID, CLIENT_SECRET, PASSWORD, PERMANENT_GRANT_CODE,
REDIRECT_URI, REFRESH_TOKEN, REQUESTOR,
TEMPORARY_GRANT_CODE, USERNAME)
from betamax import Betamax
class AuthorizerTestBase(unittest.TestCase):
def setUp(self):
self.authentication = prawcore.TrustedAuthenticator(
REQUESTOR, CLIENT_ID, CLIENT_SECRET)
class AuthorizerTest(AuthorizerTestBase):
def test_authorize__with_permanent_grant(self):
self.authentication.redirect_uri = REDIRECT_URI
authorizer = prawcore.Authorizer(self.authentication)
with Betamax(REQUESTOR).use_cassette(
'Authorizer_authorize__with_permanent_grant'):
authorizer.authorize(PERMANENT_GRANT_CODE)
self.assertIsNotNone(authorizer.access_token)
self.assertIsNotNone(authorizer.refresh_token)
self.assertIsInstance(authorizer.scopes, set)
self.assertTrue(len(authorizer.scopes) > 0)
self.assertTrue(authorizer.is_valid())
def test_authorize__with_temporary_grant(self):
self.authentication.redirect_uri = REDIRECT_URI
authorizer = prawcore.Authorizer(self.authentication)
with Betamax(REQUESTOR).use_cassette(
'Authorizer_authorize__with_temporary_grant'):
authorizer.authorize(TEMPORARY_GRANT_CODE)
self.assertIsNotNone(authorizer.access_token)
self.assertIsNone(authorizer.refresh_token)
self.assertIsInstance(authorizer.scopes, set)
self.assertTrue(len(authorizer.scopes) > 0)
self.assertTrue(authorizer.is_valid())
def test_authorize__with_invalid_code(self):
self.authentication.redirect_uri = REDIRECT_URI
authorizer = prawcore.Authorizer(self.authentication)
with Betamax(REQUESTOR).use_cassette(
'Authorizer_authorize__with_invalid_code'):
self.assertRaises(prawcore.OAuthException, authorizer.authorize,
'invalid code')
self.assertFalse(authorizer.is_valid())
def test_authorize__fail_without_redirect_uri(self):
authorizer = prawcore.Authorizer(self.authentication)
self.assertRaises(prawcore.InvalidInvocation, authorizer.authorize,
'dummy code')
self.assertFalse(authorizer.is_valid())
def test_initialize(self):
authorizer = prawcore.Authorizer(self.authentication)
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.scopes)
self.assertIsNone(authorizer.refresh_token)
self.assertFalse(authorizer.is_valid())
def test_initialize__with_refresh_token(self):
authorizer = prawcore.Authorizer(self.authentication, REFRESH_TOKEN)
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.scopes)
self.assertEqual(REFRESH_TOKEN, authorizer.refresh_token)
self.assertFalse(authorizer.is_valid())
def test_initialize__with_untrusted_authenticator(self):
authenticator = prawcore.UntrustedAuthenticator(None, None)
authorizer = prawcore.Authorizer(authenticator)
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.scopes)
self.assertIsNone(authorizer.refresh_token)
self.assertFalse(authorizer.is_valid())
def test_refresh(self):
authorizer = prawcore.Authorizer(self.authentication, REFRESH_TOKEN)
with Betamax(REQUESTOR).use_cassette('Authorizer_refresh'):
authorizer.refresh()
self.assertIsNotNone(authorizer.access_token)
self.assertIsInstance(authorizer.scopes, set)
self.assertTrue(len(authorizer.scopes) > 0)
self.assertTrue(authorizer.is_valid())
def test_refresh__with_invalid_token(self):
authorizer = prawcore.Authorizer(self.authentication, 'INVALID_TOKEN')
with Betamax(REQUESTOR).use_cassette(
'Authorizer_refresh__with_invalid_token'):
self.assertRaises(prawcore.ResponseException, authorizer.refresh)
self.assertFalse(authorizer.is_valid())
def test_refresh__without_refresh_token(self):
authorizer = prawcore.Authorizer(self.authentication)
self.assertRaises(prawcore.InvalidInvocation, authorizer.refresh)
self.assertFalse(authorizer.is_valid())
def test_revoke__access_token_with_refresh_set(self):
authorizer = prawcore.Authorizer(self.authentication, REFRESH_TOKEN)
with Betamax(REQUESTOR).use_cassette(
'Authorizer_revoke__access_token_with_refresh_set'):
authorizer.refresh()
authorizer.revoke(only_access=True)
self.assertIsNone(authorizer.access_token)
self.assertIsNotNone(authorizer.refresh_token)
self.assertIsNone(authorizer.scopes)
self.assertFalse(authorizer.is_valid())
authorizer.refresh()
self.assertTrue(authorizer.is_valid())
def test_revoke__access_token_without_refresh_set(self):
self.authentication.redirect_uri = REDIRECT_URI
authorizer = prawcore.Authorizer(self.authentication)
with Betamax(REQUESTOR).use_cassette(
'Authorizer_revoke__access_token_without_refresh_set'):
authorizer.authorize(TEMPORARY_GRANT_CODE)
authorizer.revoke()
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.refresh_token)
self.assertIsNone(authorizer.scopes)
self.assertFalse(authorizer.is_valid())
def test_revoke__refresh_token_with_access_set(self):
authorizer = prawcore.Authorizer(self.authentication, REFRESH_TOKEN)
with Betamax(REQUESTOR).use_cassette(
'Authorizer_revoke__refresh_token_with_access_set'):
authorizer.refresh()
authorizer.revoke()
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.refresh_token)
self.assertIsNone(authorizer.scopes)
self.assertFalse(authorizer.is_valid())
def test_revoke__refresh_token_without_access_set(self):
authorizer = prawcore.Authorizer(self.authentication, REFRESH_TOKEN)
with Betamax(REQUESTOR).use_cassette(
'Authorizer_revoke__refresh_token_without_access_set'):
authorizer.revoke()
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.refresh_token)
self.assertIsNone(authorizer.scopes)
self.assertFalse(authorizer.is_valid())
def test_revoke__without_access_token(self):
authorizer = prawcore.Authorizer(self.authentication, REFRESH_TOKEN)
self.assertRaises(prawcore.InvalidInvocation, authorizer.revoke,
only_access=True)
def test_revoke__without_any_token(self):
authorizer = prawcore.Authorizer(self.authentication)
self.assertRaises(prawcore.InvalidInvocation, authorizer.revoke)
class DeviceIDAuthorizerTest(AuthorizerTestBase):
def setUp(self):
self.authentication = prawcore.UntrustedAuthenticator(REQUESTOR,
CLIENT_ID)
def test_initialize(self):
authorizer = prawcore.DeviceIDAuthorizer(self.authentication)
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.scopes)
self.assertFalse(authorizer.is_valid())
def test_initialize__with_trusted_authenticator(self):
authenticator = prawcore.TrustedAuthenticator(None, None, None)
self.assertRaises(prawcore.InvalidInvocation,
prawcore.DeviceIDAuthorizer, authenticator)
def test_refresh(self):
authorizer = prawcore.DeviceIDAuthorizer(self.authentication)
with Betamax(REQUESTOR).use_cassette('DeviceIDAuthorizer_refresh'):
authorizer.refresh()
self.assertIsNotNone(authorizer.access_token)
self.assertEqual(set(['*']), authorizer.scopes)
self.assertTrue(authorizer.is_valid())
def test_refresh__with_short_device_id(self):
authorizer = prawcore.DeviceIDAuthorizer(self.authentication, 'a' * 19)
with Betamax(REQUESTOR).use_cassette(
'DeviceIDAuthorizer_refresh__with_short_device_id'):
self.assertRaises(prawcore.OAuthException, authorizer.refresh)
class ImplicitAuthorizerTest(AuthorizerTestBase):
def test_initialize(self):
authenticator = prawcore.UntrustedAuthenticator(REQUESTOR, CLIENT_ID)
authorizer = prawcore.ImplicitAuthorizer(authenticator, 'fake token',
1, 'modposts read')
self.assertEqual('fake token', authorizer.access_token)
self.assertEqual({'modposts', 'read'}, authorizer.scopes)
self.assertTrue(authorizer.is_valid())
def test_initialize__with_trusted_authenticator(self):
self.assertRaises(prawcore.InvalidInvocation,
prawcore.ImplicitAuthorizer, self.authentication,
None, None, None)
class ReadOnlyAuthorizerTest(AuthorizerTestBase):
def test_initialize__with_untrusted_authenticator(self):
authenticator = prawcore.UntrustedAuthenticator(REQUESTOR, CLIENT_ID)
self.assertRaises(prawcore.InvalidInvocation,
prawcore.ReadOnlyAuthorizer, authenticator)
def test_refresh(self):
authorizer = prawcore.ReadOnlyAuthorizer(self.authentication)
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.scopes)
self.assertFalse(authorizer.is_valid())
with Betamax(REQUESTOR).use_cassette('ReadOnlyAuthorizer_refresh'):
authorizer.refresh()
self.assertIsNotNone(authorizer.access_token)
self.assertEqual(set(['*']), authorizer.scopes)
self.assertTrue(authorizer.is_valid())
class ScriptAuthorizerTest(AuthorizerTestBase):
def test_initialize__with_untrusted_authenticator(self):
authenticator = prawcore.UntrustedAuthenticator(REQUESTOR, CLIENT_ID)
self.assertRaises(prawcore.InvalidInvocation,
prawcore.ScriptAuthorizer, authenticator, None, None)
def test_refresh(self):
authorizer = prawcore.ScriptAuthorizer(self.authentication, USERNAME,
PASSWORD)
self.assertIsNone(authorizer.access_token)
self.assertIsNone(authorizer.scopes)
self.assertFalse(authorizer.is_valid())
with Betamax(REQUESTOR).use_cassette('ScriptAuthorizer_refresh'):
authorizer.refresh()
self.assertIsNotNone(authorizer.access_token)
self.assertEqual(set(['*']), authorizer.scopes)
self.assertTrue(authorizer.is_valid())
def test_refresh__with_invalid_username_or_password(self):
authorizer = prawcore.ScriptAuthorizer(self.authentication, USERNAME,
'invalidpassword')
with Betamax(REQUESTOR).use_cassette(
'ScriptAuthorizer_refresh__with_invalid_username_or_password'):
self.assertRaises(prawcore.OAuthException, authorizer.refresh)
self.assertFalse(authorizer.is_valid())
| 44.25969
| 79
| 0.704965
|
e060d555282354addbf990eda86a5f6b91f1566f
| 65
|
py
|
Python
|
snmpsim_data/__init__.py
|
timlegge/snmpsim-data
|
b6f14d2922e8ff72ed54564ec7a6db3178ed6932
|
[
"BSD-2-Clause"
] | 30
|
2020-09-03T06:02:38.000Z
|
2022-03-11T16:34:18.000Z
|
nesi/__init__.py
|
Tubbz-alt/NESi
|
0db169dd6378fbd097380280cc41440e652de19e
|
[
"BSD-2-Clause"
] | 27
|
2019-03-14T21:50:56.000Z
|
2019-07-09T13:38:29.000Z
|
nesi/__init__.py
|
Tubbz-alt/NESi
|
0db169dd6378fbd097380280cc41440e652de19e
|
[
"BSD-2-Clause"
] | 3
|
2020-10-08T23:41:29.000Z
|
2021-02-09T17:28:28.000Z
|
# http://www.python.org/dev/peps/pep-0396/
__version__ = '0.0.1'
| 21.666667
| 42
| 0.676923
|
577fb12e186ec0ad325e24f891df8106b79f9b98
| 3,363
|
py
|
Python
|
azurelinuxagent/common/future.py
|
ezeeyahoo/WALinuxAgent
|
7bb93ee0d75b91c6e9bc6d69003b4fdce9697ec2
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/future.py
|
ezeeyahoo/WALinuxAgent
|
7bb93ee0d75b91c6e9bc6d69003b4fdce9697ec2
|
[
"Apache-2.0"
] | null | null | null |
azurelinuxagent/common/future.py
|
ezeeyahoo/WALinuxAgent
|
7bb93ee0d75b91c6e9bc6d69003b4fdce9697ec2
|
[
"Apache-2.0"
] | 1
|
2020-08-18T20:15:17.000Z
|
2020-08-18T20:15:17.000Z
|
import platform
import sys
import os
import re
# Note broken dependency handling to avoid potential backward
# compatibility issues on different distributions
try:
import distro
except Exception:
pass
"""
Add alias for python2 and python3 libs and functions.
"""
if sys.version_info[0] == 3:
import http.client as httpclient
from urllib.parse import urlparse
"""Rename Python3 str to ustr"""
ustr = str
bytebuffer = memoryview
from collections import OrderedDict
elif sys.version_info[0] == 2:
import httplib as httpclient
from urlparse import urlparse
"""Rename Python2 unicode to ustr"""
ustr = unicode
bytebuffer = buffer
if sys.version_info[1] >= 7:
from collections import OrderedDict # For Py 2.7+
else:
from ordereddict import OrderedDict # Works only on 2.6
else:
raise ImportError("Unknown python version: {0}".format(sys.version_info))
def get_linux_distribution(get_full_name, supported_dists):
"""Abstract platform.linux_distribution() call which is deprecated as of
Python 3.5 and removed in Python 3.7"""
try:
supported = platform._supported_dists + (supported_dists,)
osinfo = list(
platform.linux_distribution(
full_distribution_name=get_full_name,
supported_dists=supported
)
)
# The platform.linux_distribution() lib has issue with detecting OpenWRT linux distribution.
# Merge the following patch provided by OpenWRT as a temporary fix.
if os.path.exists("/etc/openwrt_release"):
osinfo = get_openwrt_platform()
if not osinfo or osinfo == ['', '', '']:
return get_linux_distribution_from_distro(get_full_name)
full_name = platform.linux_distribution()[0].strip()
osinfo.append(full_name)
except AttributeError:
return get_linux_distribution_from_distro(get_full_name)
return osinfo
def get_linux_distribution_from_distro(get_full_name):
"""Get the distribution information from the distro Python module."""
# If we get here we have to have the distro module, thus we do
# not wrap the call in a try-except block as it would mask the problem
# and result in a broken agent installation
osinfo = list(
distro.linux_distribution(
full_distribution_name=get_full_name
)
)
full_name = distro.linux_distribution()[0].strip()
osinfo.append(full_name)
return osinfo
def get_openwrt_platform():
"""
Add this workaround for detecting OpenWRT products because
the version and product information is contained in the /etc/openwrt_release file.
"""
result = [None, None, None]
openwrt_version = re.compile(r"^DISTRIB_RELEASE=['\"](\d+\.\d+.\d+)['\"]")
openwrt_product = re.compile(r"^DISTRIB_ID=['\"]([\w-]+)['\"]")
with open('/etc/openwrt_release', 'r') as fh:
content = fh.readlines()
for line in content:
version_matches = openwrt_version.match(line)
product_matches = openwrt_product.match(line)
if version_matches:
result[1] = version_matches.group(1)
elif product_matches:
if product_matches.group(1) == "OpenWrt":
result[0] = "openwrt"
return result
| 31.726415
| 100
| 0.666072
|
c5a804d3e4faebb0ba1600b2bee79aa7f60d263b
| 5,034
|
py
|
Python
|
Voting/views.py
|
MihaiBorsu/EVS2
|
33c8ebec6e9795c4da31e646622afdee4768fb23
|
[
"MIT"
] | null | null | null |
Voting/views.py
|
MihaiBorsu/EVS2
|
33c8ebec6e9795c4da31e646622afdee4768fb23
|
[
"MIT"
] | 3
|
2020-02-12T03:23:55.000Z
|
2021-06-10T22:24:31.000Z
|
Voting/views.py
|
MihaiBorsu/EVS2
|
33c8ebec6e9795c4da31e646622afdee4768fb23
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404,render, redirect, reverse
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.forms.models import inlineformset_factory
from .models import VotingEvent,Question,Choice
from .forms import *
ChildFormSet = inlineformset_factory(VotingEvent,Question,fields=('question_text',))
class IndexView(generic.ListView):
template_name = 'voting_index.html'
queryset = []
context_object_name = 'Enrolled_Voting_Events'
def get_queryset(self):
qs1 = VotingEvent.objects.order_by('-pub_date').filter(owner=self.request.user)
qs2 = VotingEvent.objects.order_by('-pub_date').filter(enrolled_users=self.request.user)
return (qs2 | qs1).distinct()
class IndexPublicView(generic.ListView):
template_name = 'voting_public_index.html'
queryset = []
context_object_name = 'Enrolled_Voting_Events'
def get_queryset(self):
return VotingEvent.objects.order_by('-pub_date').filter(is_public=True)
class EventView(generic.DetailView):
model = VotingEvent
template_name = 'voting_event_index.html'
context_object_name = 'event'
def questions(self):
return Question.objects.all().filter(voting_event = self.object)
class EventFormView(generic.CreateView):
template_name = 'createEvent.html'
form_class = VotingEventForm
def form_valid(self, form):
post = form.save(commit = False)
post.pub_date = timezone.now()
post.owner = self.request.user
post.save()
post.enrolled_users.add(post.owner)
post.save()
return super(EventFormView, self).form_valid(form)
def get_success_url(self):
#return reverse('add_questions',args=(self.object.id,))
nxt_url = 'addquestions'+'/'+str(self.object.id)
return nxt_url
class QuestionView(generic.DeleteView):
model = Question
template_name = 'question_index.html'
context_object_name = 'question'
def get_object(self, queryset=None):
return get_object_or_404(Question,
id=self.kwargs['question_id'],
voting_event = self.kwargs['pk'])
def choices(self):
return Choice.objects.all().filter(question = self.object)
class QuestinFormView(generic.CreateView):
template_name = 'addQuestions.html'
form_class = QuestionForm
#success_url = '/voting'
def form_valid(self, form):
post = form.save(commit=False)
event_id = self.kwargs['event_id']
event = get_object_or_404(VotingEvent,id=event_id)
post.voting_event = event
post.pub_date = timezone.now()
post.save()
return super(QuestinFormView, self).form_valid(form)
def get_success_url(self):
return '/voting/addchoices'+'/'+str(self.kwargs['event_id'])+'/'+str(self.object.id)
class ChoiceFormView(generic.CreateView):
form_class = ChoiceForm
template_name = 'addChoices.html'
def form_valid(self, form):
post = form.save(commit=False)
question_id = self.kwargs['question_id']
question = get_object_or_404(Question,id=question_id)
post.question = question
post.choice_count = 0
post.save()
return super(ChoiceFormView,self).form_valid(form)
def get_success_url(self):
if 'add_another' in self.request.POST:
return '/voting/addchoices'+'/'+self.kwargs['event_id']+'/'+self.kwargs['question_id']
return '/voting/addquestions'+'/'+str(self.kwargs['event_id'])
class ThankyouView(generic.ListView):
template_name = 'thankyou.html'
queryset = []
class AlreadyVotedView(generic.ListView):
template_name = 'already_voted.html'
queryset = []
class LearnmoreView(generic.ListView):
template_name = 'learnmore.html'
queryset = []
def vote (request, question_id,choice_id): # voting event id to be added
choice = get_object_or_404(Choice, id=choice_id)
question = get_object_or_404(Question,id=question_id)
if not request.user in question.voted_users.all():
choice.choice_count += 1;
question.voted_users.add(request.user)
choice.save()
question.save()
return HttpResponseRedirect('/voting/thankyou')
return HttpResponseRedirect('/voting/alreadyvoted')
"""
def manage_questions(request, pk):
event = get_object_or_404(VotingEvent, id=pk)
if request.method == 'POST':
formset = forms.QuestionFormset(request.POST, instance=event)
if formset.is_valid():
formset.save()
return redirect(reverse('voting:manage_questions', kwargs={"pk": event.id}))
else:
formset = forms.QuestionFormset(instance = event)
return render(request, 'createEvent.html',
{'event':event, 'question_formset':formset})
"""
| 31.074074
| 98
| 0.684545
|
ca5eb4e17ce93c3359b6f1513760a6f51e938f77
| 34,409
|
py
|
Python
|
snntoolbox/parsing/utils.py
|
qian-liu/snn_toolbox
|
9693647f9b2421a4f1ab789a97cc19fd17781e87
|
[
"MIT"
] | null | null | null |
snntoolbox/parsing/utils.py
|
qian-liu/snn_toolbox
|
9693647f9b2421a4f1ab789a97cc19fd17781e87
|
[
"MIT"
] | null | null | null |
snntoolbox/parsing/utils.py
|
qian-liu/snn_toolbox
|
9693647f9b2421a4f1ab789a97cc19fd17781e87
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Functions common to input model parsers.
The core of this module is an abstract base class extracts an input model
written in some neural network library and prepares it for further processing in
the SNN toolbox.
.. autosummary::
:nosignatures:
AbstractModelParser
The idea is to make all further steps in the conversion/simulation pipeline
independent of the original model format.
Other functions help navigate through the network in order to explore network
connectivity and layer attributes:
.. autosummary::
:nosignatures:
get_type
has_weights
get_fanin
get_fanout
get_inbound_layers
get_inbound_layers_with_params
get_inbound_layers_without_params
get_outbound_layers
get_outbound_activation
@author: rbodo
"""
from abc import abstractmethod
import keras
import numpy as np
class AbstractModelParser:
"""Abstract base class for neural network model parsers.
Parameters
----------
input_model
The input network object.
config: configparser.Configparser
Contains the toolbox configuration for a particular experiment.
Attributes
----------
input_model: dict
The input network object.
config: configparser.Configparser
Contains the toolbox configuration for a particular experiment.
_layer_list: list[dict]
A list where each entry is a dictionary containing layer
specifications. Obtained by calling `parse`. Used to build new, parsed
Keras model.
_layer_dict: dict
Maps the layer names of the specific input model library to our standard
names (currently Keras).
parsed_model: keras.models.Model
The parsed model.
"""
def __init__(self, input_model, config):
self.input_model = input_model
self.config = config
self._layer_list = []
self._layer_dict = {}
self.parsed_model = None
def parse(self):
"""Extract the essential information about a neural network.
This method serves to abstract the conversion process of a network from
the language the input model was built in (e.g. Keras or Lasagne).
The methods iterates over all layers of the input model and writes the
layer specifications and parameters into `_layer_list`. The keys are
chosen in accordance with Keras layer attributes to facilitate
instantiation of a new, parsed Keras model (done in a later step by
`build_parsed_model`).
This function applies several simplifications and adaptations to prepare
the model for conversion to spiking. These modifications include:
- Removing layers only used during training (Dropout,
BatchNormalization, ...)
- Absorbing the parameters of BatchNormalization layers into the
parameters of the preceeding layer. This does not affect performance
because batch-norm-parameters are constant at inference time.
- Removing ReLU activation layers, because their function is inherent to
the spike generation mechanism. The information which nonlinearity was
used in the original model is preserved in the ``activation`` key in
`_layer_list`. If the output layer employs the softmax function, a
spiking version is used when testing the SNN in INIsim or MegaSim
simulators.
- Inserting a Flatten layer between Conv and FC layers, if the input
model did not explicitly include one.
"""
layers = self.get_layer_iterable()
snn_layers = eval(self.config.get('restrictions', 'snn_layers'))
name_map = {}
idx = 0
inserted_flatten = False
for layer in layers:
layer_type = self.get_type(layer)
# Absorb BatchNormalization layer into parameters of previous layer
if layer_type == 'BatchNormalization':
parameters_bn = list(self.get_batchnorm_parameters(layer))
inbound = self.get_inbound_layers_with_parameters(layer)
assert len(inbound) == 1, \
"Could not find unique layer with parameters " \
"preceeding BatchNorm layer."
prev_layer = inbound[0]
prev_layer_idx = name_map[str(id(prev_layer))]
parameters = list(
self._layer_list[prev_layer_idx]['parameters'])
print("Absorbing batch-normalization parameters into " +
"parameters of previous {}.".format(self.get_type(
prev_layer)))
self._layer_list[prev_layer_idx]['parameters'] = \
absorb_bn_parameters(*(parameters + parameters_bn))
if layer_type == 'GlobalAveragePooling2D':
print("Replacing GlobalAveragePooling by AveragePooling "
"plus Flatten.")
pool_size = [layer.input_shape[-2], layer.input_shape[-1]]
self._layer_list.append(
{'layer_type': 'AveragePooling2D',
'name': self.get_name(layer, idx, 'AveragePooling2D'),
'input_shape': layer.input_shape, 'pool_size': pool_size,
'inbound': self.get_inbound_names(layer, name_map)})
name_map['AveragePooling2D' + str(idx)] = idx
idx += 1
num_str = str(idx) if idx > 9 else '0' + str(idx)
shape_string = str(np.prod(layer.output_shape[1:]))
self._layer_list.append(
{'name': num_str + 'Flatten_' + shape_string,
'layer_type': 'Flatten',
'inbound': [self._layer_list[-1]['name']]})
name_map['Flatten' + str(idx)] = idx
idx += 1
inserted_flatten = True
if layer_type not in snn_layers:
print("Skipping layer {}.".format(layer_type))
continue
if not inserted_flatten:
inserted_flatten = self.try_insert_flatten(layer, idx, name_map)
idx += inserted_flatten
print("Parsing layer {}.".format(layer_type))
if layer_type == 'MaxPooling2D' and \
self.config.getboolean('conversion', 'max2avg_pool'):
print("Replacing max by average pooling.")
layer_type = 'AveragePooling2D'
if inserted_flatten:
inbound = [self._layer_list[-1]['name']]
inserted_flatten = False
else:
inbound = self.get_inbound_names(layer, name_map)
attributes = self.initialize_attributes(layer)
attributes.update({'layer_type': layer_type,
'name': self.get_name(layer, idx),
'inbound': inbound})
if layer_type == 'Dense':
self.parse_dense(layer, attributes)
if layer_type == 'Conv2D':
self.parse_convolution(layer, attributes)
if layer_type in {'Dense', 'Conv2D'}:
weights, bias = attributes['parameters']
if self.config.getboolean('cell', 'binarize_weights'):
from snntoolbox.utils.utils import binarize
print("Binarizing weights.")
weights = binarize(weights)
elif self.config.getboolean('cell', 'quantize_weights'):
assert 'Qm.f' in attributes, \
"In the [cell] section of the configuration file, "\
"'quantize_weights' was set to True. For this to " \
"work, the layer needs to specify the fixed point " \
"number format 'Qm.f'."
from snntoolbox.utils.utils import reduce_precision
m, f = attributes.get('Qm.f')
print("Quantizing weights to Q{}.{}.".format(m, f))
weights = reduce_precision(weights, m, f)
if attributes.get('quantize_bias', False):
bias = reduce_precision(bias, m, f)
attributes['parameters'] = (weights, bias)
# These attributes are not needed any longer and would not be
# understood by Keras when building the parsed model.
attributes.pop('quantize_bias', None)
attributes.pop('Qm.f', None)
self.absorb_activation(layer, attributes)
if 'Pooling' in layer_type:
self.parse_pooling(layer, attributes)
if layer_type == 'Concatenate':
self.parse_concatenate(layer, attributes)
self._layer_list.append(attributes)
# Map layer index to layer id. Needed for inception modules.
name_map[str(id(layer))] = idx
idx += 1
print('')
@abstractmethod
def get_layer_iterable(self):
"""Get an iterable over the layers of the network.
Returns
-------
layers: list
"""
pass
@abstractmethod
def get_type(self, layer):
"""Get layer class name.
Returns
-------
layer_type: str
Layer class name.
"""
pass
@abstractmethod
def get_batchnorm_parameters(self, layer):
"""Get the parameters of a batch-normalization layer.
Returns
-------
mean, var_eps_sqrt_inv, gamma, beta, axis: tuple
"""
pass
def get_inbound_layers_with_parameters(self, layer):
"""Iterate until inbound layers are found that have parameters.
Parameters
----------
layer:
Layer
Returns
-------
: list
List of inbound layers.
"""
inbound = layer
while True:
inbound = self.get_inbound_layers(inbound)
if len(inbound) == 1:
inbound = inbound[0]
if self.has_weights(inbound):
return [inbound]
else:
result = []
for inb in inbound:
if self.has_weights(inb):
result.append(inb)
else:
result += self.get_inbound_layers_with_parameters(inb)
return result
def get_inbound_names(self, layer, name_map):
"""Get names of inbound layers.
Parameters
----------
layer:
Layer
name_map: dict
Maps the name of a layer to the `id` of the layer object.
Returns
-------
: list
The names of inbound layers.
"""
inbound = self.get_inbound_layers(layer)
for ib in range(len(inbound)):
for _ in range(len(self.layers_to_skip)):
if self.get_type(inbound[ib]) in self.layers_to_skip:
inbound[ib] = self.get_inbound_layers(inbound[ib])[0]
else:
break
if len(self._layer_list) == 0 or \
any([self.get_type(inb) == 'InputLayer' for inb in inbound]):
return ['input']
else:
inb_idxs = [name_map[str(id(inb))] for inb in inbound]
return [self._layer_list[i]['name'] for i in inb_idxs]
@abstractmethod
def get_inbound_layers(self, layer):
"""Get inbound layers of ``layer``.
Returns
-------
inbound: Sequence
"""
pass
@property
def layers_to_skip(self):
"""
Return a list of layer names that should be skipped during conversion
to a spiking network.
Returns
-------
self._layers_to_skip: List[str]
"""
return ['BatchNormalization', 'Activation', 'Dropout']
@abstractmethod
def has_weights(self, layer):
"""Return ``True`` if ``layer`` has weights."""
pass
def initialize_attributes(self, layer=None):
"""
Return a dictionary that will be used to collect all attributes of a
layer. This dictionary can then be used to instantiate a new parsed
layer.
"""
return {}
@abstractmethod
def get_input_shape(self):
"""Get the input shape of a network, not including batch size.
Returns
-------
input_shape: tuple
Input shape.
"""
pass
def get_batch_input_shape(self):
"""Get the input shape of a network, including batch size.
Returns
-------
batch_input_shape: tuple
Batch input shape.
"""
input_shape = tuple(self.get_input_shape())
batch_size = self.config.getint('simulation', 'batch_size')
return (batch_size,) + input_shape
def get_name(self, layer, idx, layer_type=None):
"""Create a name for a ``layer``.
The format is <layer_num><layer_type>_<layer_shape>.
>>> # Name of first convolution layer with 32 feature maps and dimension
>>> # 64x64:
"00Conv2D_32x64x64"
>>> # Name of final dense layer with 100 units:
"06Dense_100"
Parameters
----------
layer:
Layer.
idx: int
Layer index.
layer_type: Optional[str]
Type of layer.
Returns
-------
name: str
Layer name.
"""
if layer_type is None:
layer_type = self.get_type(layer)
output_shape = self.get_output_shape(layer)
if len(output_shape) == 2:
shape_string = '_{}'.format(output_shape[1])
else:
shape_string = '_{}x{}x{}'.format(output_shape[1],
output_shape[2],
output_shape[3])
num_str = str(idx) if idx > 9 else '0' + str(idx)
return num_str + layer_type + shape_string
@abstractmethod
def get_output_shape(self, layer):
"""Get output shape of a ``layer``.
Parameters
----------
layer
Layer.
Returns
-------
output_shape: Sized
Output shape of ``layer``.
"""
pass
def try_insert_flatten(self, layer, idx, name_map):
output_shape = self.get_output_shape(layer)
previous_layers = self.get_inbound_layers(layer)
prev_layer_output_shape = self.get_output_shape(previous_layers[0])
if len(output_shape) < len(prev_layer_output_shape) and \
self.get_type(layer) != 'Flatten':
assert len(previous_layers) == 1, "Layer to flatten must be unique."
print("Inserting layer Flatten.")
num_str = str(idx) if idx > 9 else '0' + str(idx)
shape_string = str(np.prod(prev_layer_output_shape[1:]))
self._layer_list.append({
'name': num_str + 'Flatten_' + shape_string,
'layer_type': 'Flatten',
'inbound': self.get_inbound_names(layer, name_map)})
name_map['Flatten' + str(idx)] = idx
return True
else:
return False
@abstractmethod
def parse_dense(self, layer, attributes):
"""Parse a fully-connected layer.
Parameters
----------
layer:
Layer.
attributes: dict
The layer attributes as key-value pairs in a dict.
"""
pass
@abstractmethod
def parse_convolution(self, layer, attributes):
"""Parse a convolutional layer.
Parameters
----------
layer:
Layer.
attributes: dict
The layer attributes as key-value pairs in a dict.
"""
pass
@abstractmethod
def parse_pooling(self, layer, attributes):
"""Parse a pooling layer.
Parameters
----------
layer:
Layer.
attributes: dict
The layer attributes as key-value pairs in a dict.
"""
pass
def absorb_activation(self, layer, attributes):
"""Detect what activation is used by the layer.
Sometimes the Dense or Conv layer specifies its activation directly,
sometimes it is followed by a dedicated Activation layer (possibly
with BatchNormalization in between). Here we try to find such an
activation layer, and add this information to the Dense/Conv layer
itself. The separate Activation layer can then be removed.
Parameters
----------
layer:
Layer.
attributes: dict
The layer attributes as key-value pairs in a dict.
"""
activation_str = self.get_activation(layer)
outbound = layer
for _ in range(3):
outbound = list(self.get_outbound_layers(outbound))
if len(outbound) != 1:
break
else:
outbound = outbound[0]
if self.get_type(outbound) == 'Activation':
activation_str = self.get_activation(outbound)
break
activation, activation_str = get_custom_activation(activation_str)
if activation_str == 'softmax' and \
self.config.getboolean('conversion', 'softmax_to_relu'):
activation = 'relu'
activation_str = 'relu'
print("Replaced softmax by relu activation function.")
print("Using activation {}.".format(activation_str))
attributes['activation'] = activation
@abstractmethod
def get_activation(self, layer):
"""Get the activation string of an activation ``layer``.
Parameters
----------
layer
Layer
Returns
-------
activation: str
String indicating the activation of the ``layer``.
"""
pass
@abstractmethod
def get_outbound_layers(self, layer):
"""Get outbound layers of ``layer``.
Parameters
----------
layer:
Layer.
Returns
-------
outbound: list
Outbound layers of ``layer``.
"""
pass
@abstractmethod
def parse_concatenate(self, layer, attributes):
"""Parse a concatenation layer.
Parameters
----------
layer:
Layer.
attributes: dict
The layer attributes as key-value pairs in a dict.
"""
pass
def build_parsed_model(self):
"""Create a Keras model suitable for conversion to SNN.
This method uses the specifications in `_layer_list` to build a
Keras model. The resulting model contains all essential information
about the original network, independently of the model library in which
the original network was built (e.g. Caffe).
Returns
-------
parsed_model: keras.models.Model
A Keras model, functionally equivalent to `input_model`.
"""
img_input = keras.layers.Input(batch_shape=self.get_batch_input_shape(),
name='input')
parsed_layers = {'input': img_input}
print("Building parsed model...\n")
for layer in self._layer_list:
# Replace 'parameters' key with Keras key 'weights'
if 'parameters' in layer:
layer['weights'] = layer.pop('parameters')
# Add layer
parsed_layer = getattr(keras.layers, layer.pop('layer_type'))
inbound = [parsed_layers[inb] for inb in layer.pop('inbound')]
if len(inbound) == 1:
inbound = inbound[0]
parsed_layers[layer['name']] = parsed_layer(**layer)(inbound)
print("Compiling parsed model...\n")
self.parsed_model = keras.models.Model(img_input, parsed_layers[
self._layer_list[-1]['name']])
# Optimizer and loss do not matter because we only do inference.
self.parsed_model.compile(
'sgd', 'categorical_crossentropy',
['accuracy', keras.metrics.top_k_categorical_accuracy])
return self.parsed_model
def evaluate_parsed(self, batch_size, num_to_test, x_test=None,
y_test=None, dataflow=None):
"""Evaluate parsed Keras model.
Can use either numpy arrays ``x_test, y_test`` containing the test
samples, or generate them with a dataflow
(``keras.ImageDataGenerator.flow_from_directory`` object).
Parameters
----------
batch_size: int
Batch size
num_to_test: int
Number of samples to test
x_test: Optional[np.ndarray]
y_test: Optional[np.ndarray]
dataflow: keras.ImageDataGenerator.flow_from_directory
"""
assert (x_test is not None and y_test is not None or dataflow is not
None), "No testsamples provided."
if x_test is not None:
score = self.parsed_model.evaluate(x_test, y_test, batch_size,
verbose=0)
else:
steps = int(num_to_test / batch_size)
score = self.parsed_model.evaluate_generator(dataflow, steps)
print("Top-1 accuracy: {:.2%}".format(score[1]))
print("Top-5 accuracy: {:.2%}\n".format(score[2]))
return score
def absorb_bn_parameters(weight, bias, mean, var_eps_sqrt_inv, gamma, beta,
axis):
"""
Absorb the parameters of a batch-normalization layer into the previous
layer.
"""
# TODO: Due to some issue when porting a Keras1 GoogLeNet model to Keras2,
# the axis is 1 when it should be -1. Need to find a way to avoid this hack.
if not (axis == -1 or axis == weight.ndim - 1):
print("Warning: Specifying a batch-normalization axis other than the "
"default (-1) has not been thoroughly tested yet. There might be "
"issues depending on the keras backend version (theano / "
"tensorflow) and the image_dim_ordering (channels_first / "
"channels_last). Make sure that the accuracy of the parsed model "
"matches the input model.")
axis = -1
ndim = weight.ndim
reduction_axes = list(range(ndim))
del reduction_axes[axis]
if sorted(reduction_axes) != list(range(ndim))[:-1]:
broadcast_shape = [1] * ndim
broadcast_shape[axis] = weight.shape[axis]
var_eps_sqrt_inv = np.reshape(var_eps_sqrt_inv, broadcast_shape)
gamma = np.reshape(gamma, broadcast_shape)
bias_bn = beta + (bias - mean) * gamma * var_eps_sqrt_inv
weight_bn = weight * gamma * var_eps_sqrt_inv
return weight_bn, bias_bn
def padding_string(pad, pool_size):
"""Get string defining the border mode.
Parameters
----------
pad: tuple[int]
Zero-padding in x- and y-direction.
pool_size: list[int]
Size of kernel.
Returns
-------
padding: str
Border mode identifier.
"""
if pad == (0, 0):
padding = 'valid'
elif pad == (pool_size[0] // 2, pool_size[1] // 2):
padding = 'same'
elif pad == (pool_size[0] - 1, pool_size[1] - 1):
padding = 'full'
else:
raise NotImplementedError(
"Padding {} could not be interpreted as any of the ".format(pad) +
"supported border modes 'valid', 'same' or 'full'.")
return padding
def load_parameters(filepath):
"""Load all layer parameters from an HDF5 file."""
import h5py
f = h5py.File(filepath, 'r')
params = []
for k in sorted(f.keys()):
params.append(np.array(f.get(k)))
f.close()
return params
def save_parameters(params, filepath, fileformat='h5'):
"""Save all layer parameters to an HDF5 file."""
if fileformat == 'pkl':
import pickle
pickle.dump(params, open(filepath + '.pkl', str('wb')))
else:
import h5py
with h5py.File(filepath, mode='w') as f:
for i, p in enumerate(params):
if i < 10:
j = '00' + str(i)
elif i < 100:
j = '0' + str(i)
else:
j = str(i)
f.create_dataset('param_'+j, data=p)
def has_weights(layer):
"""Return ``True`` if layer has weights.
Parameters
----------
layer : keras.layers.Layer
Keras layer
Returns
-------
: bool
``True`` if layer has weights.
"""
return len(layer.weights)
def get_inbound_layers_with_params(layer):
"""Iterate until inbound layers are found that have parameters.
Parameters
----------
layer: keras.layers.Layer
Layer
Returns
-------
: list
List of inbound layers.
"""
inbound = layer
while True:
inbound = get_inbound_layers(inbound)
if len(inbound) == 1:
inbound = inbound[0]
if has_weights(inbound) > 0:
return [inbound]
else:
result = []
for inb in inbound:
if has_weights(inb):
result.append(inb)
else:
result += get_inbound_layers_with_params(inb)
return result
def get_inbound_layers_without_params(layer):
"""Return inbound layers.
Parameters
----------
layer: Keras.layers
A Keras layer.
Returns
-------
: list[Keras.layers]
List of inbound layers.
"""
# noinspection PyProtectedMember
return [layer for layer in layer._inbound_nodes[0].inbound_layers
if len(layer.weights) == 0]
def get_inbound_layers(layer):
"""Return inbound layers.
Parameters
----------
layer: Keras.layers
A Keras layer.
Returns
-------
: list[Keras.layers]
List of inbound layers.
"""
# noinspection PyProtectedMember
return layer._inbound_nodes[0].inbound_layers
def get_outbound_layers(layer):
"""Return outbound layers.
Parameters
----------
layer: Keras.layers
A Keras layer.
Returns
-------
: list[Keras.layers]
List of outbound layers.
"""
# noinspection PyProtectedMember
return [on.outbound_layer for on in layer._outbound_nodes]
def get_outbound_activation(layer):
"""
Iterate over 2 outbound layers to find an activation layer. If there is no
activation layer, take the activation of the current layer.
Parameters
----------
layer: Union[keras.layers.Conv2D, keras.layers.Dense]
Layer
Returns
-------
activation: str
Name of outbound activation type.
"""
activation = layer.activation.__name__
outbound = layer
for _ in range(2):
outbound = get_outbound_layers(outbound)
if len(outbound) == 1 and get_type(outbound[0]) == 'Activation':
activation = outbound[0].activation.__name__
return activation
def get_fanin(layer):
"""
Return fan-in of a neuron in ``layer``.
Parameters
----------
layer: Subclass[keras.layers.Layer]
Layer.
Returns
-------
fanin: int
Fan-in.
"""
if 'Conv' in layer.name:
fanin = np.prod(layer.kernel_size) * layer.input_shape[1]
elif 'Dense' in layer.name:
fanin = layer.input_shape[1]
elif 'Pool' in layer.name:
fanin = 0
else:
fanin = 0
return fanin
def get_fanout(layer, config):
"""
Return fan-out of a neuron in ``layer``.
Parameters
----------
layer: Subclass[keras.layers.Layer]
Layer.
config: configparser.ConfigParser
Settings.
Returns
-------
fanout: Union[int, ndarray]
Fan-out. The fan-out of a neuron projecting onto a convolution layer
varies between neurons in a feature map if the stride of the convolution
layer is greater than unity. In this case, return an array of the same
shape as the layer.
"""
from snntoolbox.simulation.utils import get_spiking_outbound_layers
# In branched architectures like GoogLeNet, we have to consider multiple
# outbound layers.
next_layers = get_spiking_outbound_layers(layer, config)
fanout = 0
for next_layer in next_layers:
if 'Conv' in next_layer.name and not has_stride_unity(next_layer):
fanout = np.zeros(layer.output_shape[1:])
break
for next_layer in next_layers:
if 'Dense' in next_layer.name:
fanout += next_layer.units
elif 'Pool' in next_layer.name:
fanout += 1
elif 'Conv' in next_layer.name:
if has_stride_unity(next_layer):
fanout += np.prod(next_layer.kernel_size) * next_layer.filters
else:
fanout += get_fanout_array(layer, next_layer)
return fanout
def has_stride_unity(layer):
"""Return `True` if the strides in all dimensions of a ``layer`` are 1."""
return all([s == 1 for s in layer.strides])
def get_fanout_array(layer_pre, layer_post):
"""
Return an array of the same shape as ``layer_pre``, where each entry gives
the number of outgoing connections of a neuron. In convolution layers where
the post-synaptic layer has stride > 1, the fan-out varies between neurons.
"""
nx = layer_post.output_shape[3] # Width of feature map
ny = layer_post.output_shape[2] # Height of feature map
kx, ky = layer_post.kernel_size # Width and height of kernel
px = int((kx - 1) / 2) if layer_post.padding == 'valid' else 0
py = int((ky - 1) / 2) if layer_post.padding == 'valid' else 0
sx = layer_post.strides[1]
sy = layer_post.strides[0]
fanout = np.zeros(layer_pre.output_shape[1:])
for x_pre in range(fanout.shape[1]):
for y_pre in range(fanout.shape[2]):
x_post = [int((x_pre + px) / sx)]
y_post = [int((y_pre + py) / sy)]
wx = [(x_pre + px) % sx]
wy = [(y_pre + py) % sy]
i = 1
while wx[0] + i * sx < kx:
x = x_post[0] - i
if 0 <= x < nx:
x_post.append(x)
i += 1
i = 1
while wy[0] + i * sy < ky:
y = y_post[0] - i
if 0 <= y < ny:
y_post.append(y)
i += 1
fanout[:, x_pre, y_pre] = len(x_post) * len(y_post)
return fanout
def get_type(layer):
"""Get type of Keras layer.
Parameters
----------
layer: Keras.layers.Layer
Keras layer.
Returns
-------
: str
Layer type.
"""
return layer.__class__.__name__
def get_quantized_activation_function_from_string(activation_str):
"""
Parse a string describing the activation of a layer, and return the
corresponding activation function.
Parameters
----------
activation_str : str
Describes activation.
Returns
-------
activation : functools.partial
Activation function.
Examples
--------
>>> f = get_quantized_activation_function_from_string('relu_Q1.15')
>>> f
functools.partial(<function reduce_precision at 0x7f919af92b70>,
f='15', m='1')
>>> print(f.__name__)
relu_Q1.15
"""
# TODO: We implicitly assume relu activation function here. Change this to
# allow for general activation functions with reduced precision.
from functools import partial
from snntoolbox.utils.utils import quantized_relu
m, f = map(int, activation_str[activation_str.index('_Q') + 2:].split('.'))
activation = partial(quantized_relu, m=m, f=f)
activation.__name__ = activation_str
return activation
def get_clamped_relu_from_string(activation_str):
from snntoolbox.utils.utils import ClampedReLU
threshold, max_value = map(eval, activation_str.split('_')[-2:])
activation = ClampedReLU(threshold, max_value)
return activation
def get_custom_activation(activation_str):
"""
If ``activation_str`` describes a custom activation function, import this
function from `snntoolbox.utils.utils` and return it. If custom activation
function is not found or implemented, return the ``activation_str`` in place
of the activation function.
Parameters
----------
activation_str : str
Describes activation.
Returns
-------
activation :
Activation function.
activation_str : str
Describes activation.
"""
if activation_str == 'binary_sigmoid':
from snntoolbox.utils.utils import binary_sigmoid
activation = binary_sigmoid
elif activation_str == 'binary_tanh':
from snntoolbox.utils.utils import binary_tanh
activation = binary_tanh
elif '_Q' in activation_str:
activation = get_quantized_activation_function_from_string(
activation_str)
elif 'clamped_relu' in activation_str:
activation = get_clamped_relu_from_string(activation_str)
else:
activation = activation_str
return activation, activation_str
def get_custom_activations_dict():
"""
Import all implemented custom activation functions so they can be used when
loading a Keras model.
"""
from snntoolbox.utils.utils import binary_sigmoid, binary_tanh, ClampedReLU
# Todo: We should be able to load a different activation for each layer.
# Need to remove this hack:
activation_str = 'relu_Q1.4'
activation = get_quantized_activation_function_from_string(activation_str)
return {'binary_sigmoid': binary_sigmoid,
'binary_tanh': binary_tanh,
'clamped_relu': ClampedReLU(), # Todo: This should work regardless of the specific attributes of the ClampedReLU class used during training.
activation_str: activation}
| 28.866611
| 153
| 0.580342
|
f1ce280c74657622e221a4ffaa05f48a90510c0c
| 2,354
|
py
|
Python
|
koans/about_asserts.py
|
slcushing/python_koans
|
70e4301ac4cec1293bc1eb36032d018ec1eed0f9
|
[
"MIT"
] | null | null | null |
koans/about_asserts.py
|
slcushing/python_koans
|
70e4301ac4cec1293bc1eb36032d018ec1eed0f9
|
[
"MIT"
] | null | null | null |
koans/about_asserts.py
|
slcushing/python_koans
|
70e4301ac4cec1293bc1eb36032d018ec1eed0f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
self.assertTrue(True) # This should be True
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
self.assertTrue(True, "This should be True -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
self.assertEqual(2, 1 + 1) #test if first and second number are equal
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against reality.
"""
expected_value = 2
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = 2
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
assert True
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "navel". What is its class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> "navel" <=-' != <type 'str'>
#
# So "navel".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(str, "navel".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
| 29.797468
| 83
| 0.608751
|
0e3b6d70547ff6e172275def3d0e9c5cac4b24fd
| 668
|
py
|
Python
|
schemas.py
|
strcho/sayhello
|
b9ad082cc5e3fccbe95190079944d094493207a8
|
[
"MIT"
] | 13
|
2021-02-18T08:07:12.000Z
|
2022-03-28T06:48:36.000Z
|
schemas.py
|
strcho/sayhello
|
b9ad082cc5e3fccbe95190079944d094493207a8
|
[
"MIT"
] | 1
|
2021-01-29T06:36:44.000Z
|
2021-01-29T08:29:02.000Z
|
schemas.py
|
strcho/sayhello
|
b9ad082cc5e3fccbe95190079944d094493207a8
|
[
"MIT"
] | 6
|
2021-04-08T09:37:39.000Z
|
2022-03-02T02:13:30.000Z
|
from datetime import datetime
from fastapi import Body
from pydantic import BaseModel
from typing import List
class MessageBase(BaseModel):
name: str = Body(..., min_length=2, max_length=8)
body: str = Body(..., min_length=1, max_length=200)
class MessageCreate(MessageBase):
pass
class Message(MessageBase):
id: int = None
create_at: datetime
class Config:
orm_mode = True
class Response200(BaseModel):
code: int = 200
msg: str = "操作成功"
data: Message = None
class ResponseList200(Response200):
total: int
data: List[Message]
class Response400(Response200):
code: int = 400
msg: str = "无数据返回"
| 16.7
| 55
| 0.679641
|
c2a32195b41b68af78cce42513a2bfe97e2a91d5
| 3,695
|
py
|
Python
|
tests/validation/tests/v3_api/test_catalog_library.py
|
httpsOmkar/rancher
|
810740fcdb3b1b73a890cb120f58165195ee02c9
|
[
"Apache-2.0"
] | 1
|
2020-02-19T08:36:18.000Z
|
2020-02-19T08:36:18.000Z
|
tests/validation/tests/v3_api/test_catalog_library.py
|
httpsOmkar/rancher
|
810740fcdb3b1b73a890cb120f58165195ee02c9
|
[
"Apache-2.0"
] | null | null | null |
tests/validation/tests/v3_api/test_catalog_library.py
|
httpsOmkar/rancher
|
810740fcdb3b1b73a890cb120f58165195ee02c9
|
[
"Apache-2.0"
] | null | null | null |
"""
This file has tests to deploy apps in a project created in a cluster.
Test requirements:
Env variables - Cattle_url, Admin Token, User Token, Cluster Name
Test on at least 3 worker nodes
App versions are given in 'cataloglib_appversion.json' file
"""
import json
from .common import os
from .common import pytest
from .common import create_ns
from .common import create_catalog_external_id
from .common import validate_app_deletion
from .common import get_user_client_and_cluster
from .common import create_kubeconfig
from .common import get_cluster_client_for_token
from .common import create_project
from .common import random_test_name
from .common import get_defaut_question_answers
from .common import validate_catalog_app
from .common import get_project_client_for_token
from .common import USER_TOKEN
from .common import get_user_client
cluster_info = {"cluster": None, "cluster_client": None,
"project": None, "project_client": None,
"user_client": None}
catalog_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"./resource/cataloglib_appversion.json")
with open(catalog_filename, "r") as app_v:
app_data = json.load(app_v)
@pytest.mark.parametrize('app_name, app_version', app_data.items())
def test_catalog_app_deploy(app_name, app_version):
"""
Runs for app from 'cataloglib_appversion.json',
creates relevant namespace and deploy them.
Validates status of the app, version and answer.
try block is to make sure apps are deleted even
after they fail to validate.
"""
user_client = cluster_info["user_client"]
project_client = cluster_info["project_client"]
cluster_client = cluster_info["cluster_client"]
cluster = cluster_info["cluster"]
project = cluster_info["project"]
ns = create_ns(cluster_client, cluster, project, app_name)
app_ext_id = create_catalog_external_id('library',
app_name, app_version)
answer = get_defaut_question_answers(user_client, app_ext_id)
try:
app = project_client.create_app(
name=random_test_name(),
externalId=app_ext_id,
targetNamespace=ns.name,
projectId=ns.projectId,
answers=answer)
validate_catalog_app(project_client, app, app_ext_id, answer)
except (AssertionError, RuntimeError):
project_client.delete(app)
validate_app_deletion(project_client, app.id)
user_client.delete(ns)
assert False, "App deployment/Validation failed."
project_client.delete(app)
validate_app_deletion(project_client, app.id)
user_client.delete(ns)
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
"""
Creates project in a cluster and collects details of
user, project and cluster
"""
user_client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
cluster_client = get_cluster_client_for_token(cluster, USER_TOKEN)
project = create_project(user_client, cluster,
random_test_name("App-deployment"))
project_client = get_project_client_for_token(project, USER_TOKEN)
cluster_info["cluster"] = cluster
cluster_info["cluster_client"] = cluster_client
cluster_info["project"] = project
cluster_info["project_client"] = project_client
cluster_info["user_client"] = user_client
def fin():
client = get_user_client()
client.delete(cluster_info["project"])
request.addfinalizer(fin)
| 37.704082
| 76
| 0.70203
|
e6412f47d4c4a8aad1204ad98b2231f9db244346
| 6,192
|
py
|
Python
|
src/dials/algorithms/indexing/basis_vector_search/real_space_grid_search.py
|
dials-src/dials
|
25055c1f6164dc33e672e7c5c6a9c5a35e870660
|
[
"BSD-3-Clause"
] | 1
|
2021-12-10T17:28:16.000Z
|
2021-12-10T17:28:16.000Z
|
src/dials/algorithms/indexing/basis_vector_search/real_space_grid_search.py
|
dials-src/dials
|
25055c1f6164dc33e672e7c5c6a9c5a35e870660
|
[
"BSD-3-Clause"
] | null | null | null |
src/dials/algorithms/indexing/basis_vector_search/real_space_grid_search.py
|
dials-src/dials
|
25055c1f6164dc33e672e7c5c6a9c5a35e870660
|
[
"BSD-3-Clause"
] | 1
|
2021-12-07T12:39:04.000Z
|
2021-12-07T12:39:04.000Z
|
from __future__ import annotations
import logging
import math
from libtbx import phil
from rstbx.array_family import (
flex, # required to load scitbx::af::shared<rstbx::Direction> to_python converter
)
from rstbx.dps_core import SimpleSamplerTool
from scitbx import matrix
from dials.algorithms.indexing import DialsIndexError
from .strategy import Strategy
from .utils import group_vectors
logger = logging.getLogger(__name__)
real_space_grid_search_phil_str = """\
characteristic_grid = 0.02
.type = float(value_min=0)
max_vectors = 30
.help = "The maximum number of unique vectors to find in the grid search."
.type = int(value_min=3)
"""
class RealSpaceGridSearch(Strategy):
"""
Basis vector search using a real space grid search.
Search strategy to index found spots based on known unit cell parameters. It is
often useful for difficult cases of narrow-wedge rotation data or stills data,
especially where there is diffraction from multiple crystals.
A set of dimensionless radial unit vectors, typically ~7000 in total, is chosen
so that they are roughly evenly spaced in solid angle over a hemisphere. For each
direction, each of the three known unit cell vectors is aligned with the unit
vector and is scored according to how well it accords with the periodicity in
that direction of the reconstructed reciprocal space positions of the observed
spot centroids. Examining the highest-scoring combinations, any basis vectors in
orientations that are nearly collinear with a shorter basis vector are
eliminated. The highest-scoring remaining combinations are selected as the basis
of the direct lattice.
See:
Gildea, R. J., Waterman, D. G., Parkhurst, J. M., Axford, D., Sutton, G., Stuart, D. I., Sauter, N. K., Evans, G. & Winter, G. (2014). Acta Cryst. D70, 2652-2666.
"""
phil_help = (
"Index the found spots by testing a known unit cell in various orientations "
"until the best match is found. This strategy is often useful for difficult "
"cases of narrow-wedge rotation data or stills data, especially where there "
"is diffraction from multiple crystals."
)
phil_scope = phil.parse(real_space_grid_search_phil_str)
def __init__(self, max_cell, target_unit_cell, params=None, *args, **kwargs):
"""Construct a real_space_grid_search object.
Args:
max_cell (float): An estimate of the maximum cell dimension of the primitive
cell.
target_unit_cell (cctbx.uctbx.unit_cell): The target unit cell.
"""
super().__init__(max_cell, params=params, *args, **kwargs)
if target_unit_cell is None:
raise DialsIndexError(
"Target unit cell must be provided for real_space_grid_search"
)
self._target_unit_cell = target_unit_cell
@property
def search_directions(self):
"""Generator of the search directions (i.e. vectors with length 1)."""
SST = SimpleSamplerTool(self._params.characteristic_grid)
SST.construct_hemisphere_grid(SST.incr)
for direction in SST.angles:
yield matrix.col(direction.dvec)
@property
def search_vectors(self):
"""Generator of the search vectors.
The lengths of the vectors correspond to the target unit cell dimensions.
"""
unique_cell_dimensions = set(self._target_unit_cell.parameters()[:3])
for i, direction in enumerate(self.search_directions):
for l in unique_cell_dimensions:
yield direction * l
@staticmethod
def compute_functional(vector, reciprocal_lattice_vectors):
"""Compute the functional for a single direction vector.
Args:
vector (tuple): The vector at which to compute the functional.
reciprocal_lattice_vectors (scitbx.array_family.flex.vec3_double):
The list of reciprocal lattice vectors.
Returns:
The functional for the given vector.
"""
two_pi_S_dot_v = 2 * math.pi * reciprocal_lattice_vectors.dot(vector)
return flex.sum(flex.cos(two_pi_S_dot_v))
def score_vectors(self, reciprocal_lattice_vectors):
"""Compute the functional for the given directions.
Args:
directions: An iterable of the search directions.
reciprocal_lattice_vectors (scitbx.array_family.flex.vec3_double):
The list of reciprocal lattice vectors.
Returns:
A tuple containing the list of search vectors and their scores.
"""
vectors = flex.vec3_double()
scores = flex.double()
for i, v in enumerate(self.search_vectors):
f = self.compute_functional(v.elems, reciprocal_lattice_vectors)
vectors.append(v.elems)
scores.append(f)
return vectors, scores
def find_basis_vectors(self, reciprocal_lattice_vectors):
"""Find a list of likely basis vectors.
Args:
reciprocal_lattice_vectors (scitbx.array_family.flex.vec3_double):
The list of reciprocal lattice vectors to search for periodicity.
"""
used_in_indexing = flex.bool(reciprocal_lattice_vectors.size(), True)
logger.info("Indexing from %i reflections", used_in_indexing.count(True))
vectors, weights = self.score_vectors(reciprocal_lattice_vectors)
perm = flex.sort_permutation(weights, reverse=True)
vectors = vectors.select(perm)
weights = weights.select(perm)
groups = group_vectors(vectors, weights, max_groups=self._params.max_vectors)
unique_vectors = []
unique_weights = []
for g in groups:
idx = flex.max_index(flex.double(g.weights))
unique_vectors.append(g.vectors[idx])
unique_weights.append(g.weights[idx])
logger.info("Number of unique vectors: %i", len(unique_vectors))
for v, w in zip(unique_vectors, unique_weights):
logger.debug("%s %s %s", w, v.length(), str(v.elems))
return unique_vectors, used_in_indexing
| 39.189873
| 170
| 0.682332
|
cce431b00f79e2416e833ba7d9b91ff7ac01b7c2
| 5,035
|
py
|
Python
|
docs/conf.py
|
vznncv/vznncv-miniterm
|
a5999744435350304e26c4b4c97f7a999b2e5abd
|
[
"MIT"
] | 1
|
2022-02-17T20:23:12.000Z
|
2022-02-17T20:23:12.000Z
|
docs/conf.py
|
vznncv/vznncv-miniterm
|
a5999744435350304e26c4b4c97f7a999b2e5abd
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
vznncv/vznncv-miniterm
|
a5999744435350304e26c4b4c97f7a999b2e5abd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is execfiled with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
from os.path import join, abspath
src_path = abspath(join('..', 'src'))
lib_name = "vznncv-miniterm"
root_package_path = join(src_path, *(lib_name.split('-')))
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'm2r']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = lib_name
copyright = u"2021, Konstantin Kochin"
author = u"Konstantin Kochin"
# Version info -- read without importing
_locals = {}
with open(join(root_package_path, '_version.py')) as fp:
exec(fp.read(), None, _locals)
__version__ = _locals['__version__']
__version_info__ = _locals['__version_info__']
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = '{}.{}'.format(__version_info__[0], __version_info__[1])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'vznncv-miniterm_doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, project + '.tex',
project + u' Documentation',
author, 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project,
project + u' Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project,
project + u' Documentation',
author,
project,
'One line description of project.',
'Miscellaneous'),
]
| 31.46875
| 77
| 0.684608
|
b37252e8cfd1fc3f8bdc8b6104a1cda421cd3fb8
| 449
|
py
|
Python
|
learn/migrations/0038_auto_20210622_2013.py
|
Shivamjha12/Mybio
|
e4bbcffa58341612ee684d74ba00cdb2125ef07b
|
[
"Unlicense",
"MIT"
] | 2
|
2021-08-29T08:07:03.000Z
|
2021-12-11T07:26:24.000Z
|
learn/migrations/0038_auto_20210622_2013.py
|
Shivamjha12/Mybio
|
e4bbcffa58341612ee684d74ba00cdb2125ef07b
|
[
"Unlicense",
"MIT"
] | null | null | null |
learn/migrations/0038_auto_20210622_2013.py
|
Shivamjha12/Mybio
|
e4bbcffa58341612ee684d74ba00cdb2125ef07b
|
[
"Unlicense",
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-06-22 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('learn', '0037_auto_20210622_2003'),
]
operations = [
migrations.AlterField(
model_name='images',
name='ProfileImage',
field=models.ImageField(default='static/img/images/Screenshot_368.png', null=True, upload_to=''),
),
]
| 23.631579
| 109
| 0.623608
|
7c1597cb1a5c5579e20cf49f15b80f901775cf2c
| 1,047
|
py
|
Python
|
operator/hacks/csv_prep.py
|
deeghuge/ibm-spectrum-scale-csi
|
572a94a263aa9a850e8377eacfe3d25be8df12c8
|
[
"Apache-2.0"
] | null | null | null |
operator/hacks/csv_prep.py
|
deeghuge/ibm-spectrum-scale-csi
|
572a94a263aa9a850e8377eacfe3d25be8df12c8
|
[
"Apache-2.0"
] | null | null | null |
operator/hacks/csv_prep.py
|
deeghuge/ibm-spectrum-scale-csi
|
572a94a263aa9a850e8377eacfe3d25be8df12c8
|
[
"Apache-2.0"
] | 1
|
2020-07-30T10:12:37.000Z
|
2020-07-30T10:12:37.000Z
|
#!/bin/python
import argparse
import sys
import os
import yaml
BASE_DIR="{0}/../".format(os.path.dirname(os.path.realpath(__file__)))
DEFAULT_VERSION="1.0.1"
CSV_PATH="{0}deploy/olm-catalog/ibm-spectrum-scale-csi-operator/{1}/ibm-spectrum-scale-csi-operator.v{1}.clusterserviceversion.yaml"
def main(args):
parser = argparse.ArgumentParser(
description='''A hack to prep the CSV for regeneration.''')
parser.add_argument( '--version', metavar='CSV Version', dest='version', default=DEFAULT_VERSION,
help='''The version of the CSV to update''')
args = parser.parse_args()
csvf = CSV_PATH.format(BASE_DIR, args.version)
csv = None
try:
with open(csvf, 'r') as stream:
csv = yaml.safe_load(stream)
except yaml.YAMLError as e:
print(e)
return 1
# Edit the contents of the CSV
if csv is not None:
csv.get("spec",{}).pop("install", None)
with open(csvf, 'w') as outfile:
yaml.dump(csv, outfile, default_flow_style=False)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 25.536585
| 132
| 0.684814
|
47c3f02b531c730d269fd3c266800d770d7bf003
| 3,347
|
py
|
Python
|
tests/test_pdf_polynomials.py
|
simonthor/zfit
|
97a18cd6cf14240be2cf52185681d0132f866179
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_pdf_polynomials.py
|
simonthor/zfit
|
97a18cd6cf14240be2cf52185681d0132f866179
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_pdf_polynomials.py
|
simonthor/zfit
|
97a18cd6cf14240be2cf52185681d0132f866179
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021 zfit
import copy
import numpy as np
import pytest
import tensorflow as tf
import zfit
obs1_random = zfit.Space(obs="obs1", limits=(-1.5, 1.2))
obs1 = zfit.Space(obs="obs1", limits=(-1, 1))
coeffs_parametrization = [
1.4,
[0.6],
[1.42, 1.2],
[0.2, 0.8, 0.5],
[8.1, 1.4, 3.6, 4.1],
[1.1, 1.42, 1.2, 0.4, 0.7],
[11.1, 1.4, 5.6, 3.1, 18.1, 3.1],
]
rel_integral = 7e-2
default_sampling = 100000
poly_pdfs = [(zfit.pdf.Legendre, default_sampling),
(zfit.pdf.Chebyshev, default_sampling),
(zfit.pdf.Chebyshev2, default_sampling),
(zfit.pdf.Hermite, default_sampling * 20),
(zfit.pdf.Laguerre, default_sampling * 20)]
@pytest.mark.parametrize("poly_cfg", poly_pdfs)
@pytest.mark.parametrize("coeffs", coeffs_parametrization)
@pytest.mark.flaky(3)
def test_polynomials(poly_cfg, coeffs):
coeffs = copy.copy(coeffs)
poly_pdf, n_sampling = poly_cfg
polynomial = poly_pdf(obs=obs1, coeffs=coeffs)
polynomial2 = poly_pdf(obs=obs1, coeffs=coeffs)
polynomial_coeff0 = poly_pdf(obs=obs1, coeffs=coeffs, coeff0=1.)
lower, upper = obs1.rect_limits
x = np.random.uniform(size=(1000,), low=lower[0], high=upper[0])
y_poly = polynomial.pdf(x)
y_poly_u = polynomial.pdf(x, norm_range=False)
y_poly2 = polynomial2.pdf(x)
y_poly2_u = polynomial2.pdf(x, norm_range=False)
y_poly_coeff0 = polynomial_coeff0.pdf(x)
y_poly_coeff0_u = polynomial_coeff0.pdf(x, norm_range=False)
y_poly_np, y_poly2_np, y_poly_coeff0_np = [y_poly.numpy(), y_poly2.numpy(), y_poly_coeff0.numpy()]
y_polyu_np, y_poly2u_np, y_polyu_coeff0_np = [y_poly_u.numpy(), y_poly2_u.numpy(), y_poly_coeff0_u.numpy()]
np.testing.assert_allclose(y_polyu_np, y_poly2u_np)
np.testing.assert_allclose(y_polyu_np, y_polyu_coeff0_np)
np.testing.assert_allclose(y_poly_np, y_poly2_np)
np.testing.assert_allclose(y_poly_np, y_poly_coeff0_np)
# test 1 to 1 range
integral = polynomial.analytic_integrate(limits=obs1, norm_range=False)
numerical_integral = polynomial.numeric_integrate(limits=obs1, norm_range=False)
analytic_integral = integral.numpy()
assert pytest.approx(analytic_integral, rel=rel_integral) == numerical_integral.numpy()
# test with different range scaling
polynomial = poly_pdf(obs=obs1_random, coeffs=coeffs)
# test with limits != space
integral = polynomial.analytic_integrate(limits=obs1, norm_range=False)
numerical_integral = polynomial.numeric_integrate(limits=obs1, norm_range=False)
analytic_integral = integral.numpy()
assert pytest.approx(analytic_integral, rel=rel_integral) == numerical_integral.numpy()
# test with limits == space
integral = polynomial.analytic_integrate(limits=obs1_random, norm_range=False)
numerical_integral = polynomial.numeric_integrate(limits=obs1_random, norm_range=False)
analytic_integral = integral.numpy()
assert pytest.approx(analytic_integral, rel=rel_integral) == numerical_integral.numpy()
lower, upper = obs1_random.limit1d
sample = tf.random.uniform((n_sampling, 1), lower, upper, dtype=tf.float64)
test_integral = np.average(polynomial.pdf(sample, norm_range=False)) * obs1_random.rect_area()
assert pytest.approx(analytic_integral, rel=rel_integral * 3) == test_integral
| 40.325301
| 111
| 0.722737
|
9a0a2e39e0b3815955ab9b4fb41507654fa498ea
| 3,206
|
py
|
Python
|
fasthangul/python/fasthangul/chars_test.py
|
jeongukjae/fasthangul
|
e9c8c88247ce6710f339317a687835a52750fb33
|
[
"MIT"
] | 6
|
2019-12-16T01:15:38.000Z
|
2021-02-19T06:13:52.000Z
|
fasthangul/python/fasthangul/chars_test.py
|
jeongukjae/fasthangul
|
e9c8c88247ce6710f339317a687835a52750fb33
|
[
"MIT"
] | 14
|
2019-12-15T20:40:15.000Z
|
2021-09-09T04:16:43.000Z
|
fasthangul/python/fasthangul/chars_test.py
|
jeongukjae/fasthangul
|
e9c8c88247ce6710f339317a687835a52750fb33
|
[
"MIT"
] | 1
|
2020-06-25T01:26:07.000Z
|
2020-06-25T01:26:07.000Z
|
import random
import string
import unittest
from fasthangul import chars
class TestChars(unittest.TestCase):
def test_compose_jamos(self):
assert chars.compose_jamos("ㅇㅏㄴㄴㅕㅇ") == "안녕"
assert chars.compose_jamos("ㅇㅏㄴㄴㅕㅇ ") == "안녕 "
assert chars.compose_jamos("abcdㅇㅏㄴㄴㅕㅇ ") == "abcd안녕 "
assert chars.compose_jamos("ㄴㅓ ㅁㅝㅎㅐ?") == "너 뭐해?"
assert chars.compose_jamos("ㄴㅓ ㅎㅁㅝㅎㅐ?") == "너 ㅎ뭐해?"
assert chars.compose_jamos("ㅉㅡㅎㅂㅛㅎ") == "쯯뵿"
def test_decompose_jamos(self):
assert chars.decompose_jamos("안녕") == "ㅇㅏㄴㄴㅕㅇ"
assert chars.decompose_jamos("안녕 ") == "ㅇㅏㄴㄴㅕㅇ "
assert chars.decompose_jamos("abcd안녕 ") == "abcdㅇㅏㄴㄴㅕㅇ "
assert chars.decompose_jamos("너 뭐해?") == "ㄴㅓ ㅁㅝㅎㅐ?"
def test_jamo_splitter(self):
splitter = chars.JamoSplitter(True)
assert splitter.decompose("아니 이게 아닌데") == "ㅇㅏᴥㄴㅣᴥ ㅇㅣᴥㄱㅔᴥ ㅇㅏᴥㄴㅣㄴㄷㅔᴥ"
assert splitter.decompose("너 뭐해?") == "ㄴㅓᴥ ㅁㅝᴥㅎㅐᴥ?"
assert splitter.compose("ㅇㅏᴥㄴㅣᴥ ㅇㅣᴥㄱㅔᴥ ㅇㅏᴥㄴㅣㄴㄷㅔᴥ") == "아니 이게 아닌데"
assert splitter.compose("ㄴㅓe ㅁㅝeㅎㅐe?") == "너e 뭐e해e?"
def test_jamo_splitter_custom(self):
splitter = chars.JamoSplitter(True, "e")
assert splitter.decompose("아니 이게 아닌데") == "ㅇㅏeㄴㅣe ㅇㅣeㄱㅔe ㅇㅏeㄴㅣㄴㄷㅔe"
assert splitter.decompose("너 뭐해?") == "ㄴㅓe ㅁㅝeㅎㅐe?"
assert splitter.compose("ㅇㅏeㄴㅣe ㅇㅣeㄱㅔe ㅇㅏeㄴㅣㄴㄷㅔe") == "아니 이게 아닌데"
assert splitter.compose("ㄴㅓe ㅁㅝeㅎㅐe?") == "너 뭐해?"
assert str(splitter) == "<JamoSplitter fillEmptyJongsung=1, defaultJongsung=101>"
def test_constructor_should_raise(self):
with self.assertRaises(ValueError):
chars.JamoSplitter(True, "ee")
def test_large_text(self):
letters = string.ascii_letters + "".join(map(chr, range(ord("가"), ord("힣") + 1))) + " "
original_sentences = "".join(random.sample(letters, random.randint(7000, 10000)))
decomposed = chars.decompose_jamos(original_sentences)
composed = chars.compose_jamos(decomposed)
assert composed == original_sentences
def test_large_text_using_splitter(self):
splitter = chars.JamoSplitter(True)
letters = string.ascii_letters + "".join(map(chr, range(ord("가"), ord("힣") + 1))) + " "
original_sentences = "".join(random.sample(letters, random.randint(7000, 10000)))
decomposed = splitter.decompose(original_sentences)
composed = splitter.compose(decomposed)
assert composed == original_sentences
def test_levenshtein_distance(self):
assert chars.levenshtein_distance("안녕", "안녕하세요~") == 4
assert chars.levenshtein_distance("안녕", "안하세요~") == 4
def test_decomposed_levenshtein_distance(self):
assert chars.decomposed_levenshtein_distance("에어팟", "에앞ㅏㅅ") == 1
def test_get_longest_common_substring(self):
assert chars.get_longest_common_substring("안녕하세요~", "네 안녕하세요") == (0, 5)
self.assertEqual(
chars.get_longest_common_substring("fasthangul이라는 라이브러리를 계속 업데이트하려고 하는데, 이게 잘 될까요? 일단 열심히 해보려고요. 형태소 분석기도 곧 넣어보고요.", "형태소분석기"),
(61, 3),
)
if __name__ == "__main__":
unittest.main()
| 39.580247
| 139
| 0.650031
|
b9287c19264f8eb0d52f2af07d433453731a65b8
| 720
|
py
|
Python
|
idact/detail/log/get_logger.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 5
|
2018-12-06T15:40:34.000Z
|
2019-06-19T11:22:58.000Z
|
idact/detail/log/get_logger.py
|
garstka/idact
|
b9c8405c94db362c4a51d6bfdf418b14f06f0da1
|
[
"MIT"
] | 9
|
2018-12-06T16:35:26.000Z
|
2019-04-28T19:01:40.000Z
|
idact/detail/log/get_logger.py
|
intdata-bsc/idact
|
54cb65a711c145351e205970c27c83e6393cccf5
|
[
"MIT"
] | 2
|
2019-04-28T19:18:58.000Z
|
2019-06-17T06:56:28.000Z
|
"""This module contains functions for getting a logger from a global provider.
"""
import logging
from idact.detail.log.logger_provider import LoggerProvider
def get_logger(name: str) -> logging.Logger:
"""Returns a logger with the proper logging level set.
See :class:`.LoggerProvider`.
:param name: Logger name, e.g. `__name__` of the caller.
"""
return LoggerProvider().get_logger(name=name)
def get_debug_logger(name: str) -> logging.Logger:
"""Returns a logger that will log everything with DEBUG level.
See :class:`.LoggerProvider`.
:param name: Logger name, e.g. `__name__` of the caller.
"""
return LoggerProvider().get_debug_logger(name=name)
| 24.827586
| 78
| 0.691667
|
a796992917038fd11cda55a1da0f6f8afc38b0c6
| 1,031
|
py
|
Python
|
schmecko/echo_server.py
|
rmfitzpatrick/schmecko
|
b2ed842aaf89be80bcd4387d8a3928e4ea4a040d
|
[
"MIT"
] | null | null | null |
schmecko/echo_server.py
|
rmfitzpatrick/schmecko
|
b2ed842aaf89be80bcd4387d8a3928e4ea4a040d
|
[
"MIT"
] | null | null | null |
schmecko/echo_server.py
|
rmfitzpatrick/schmecko
|
b2ed842aaf89be80bcd4387d8a3928e4ea4a040d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import gzip
from argparse import ArgumentParser
from pprint import pprint
import io
from werkzeug.routing import Rule
from flask import Flask, request, jsonify
app = Flask(__name__)
app.url_map.add(Rule('/<path:path>', endpoint='path'))
@app.endpoint('path')
def echo(path):
data = request.data
if request.content_encoding == 'gzip':
stream = io.BytesIO(data)
data = gzip.GzipFile(fileobj=stream, mode='rb').read()
pprint(data)
response = dict(method=request.method,
host=request.host,
path=request.path,
args=request.args,
headers=dict(request.headers.items()),
data='')
return jsonify(response)
ap = ArgumentParser()
ap.add_argument('-p', '--port', default=9080)
ap.add_argument('--host', default='0.0.0.0')
def run():
args = ap.parse_args()
app.run(host=args.host, port=args.port, debug=True, use_reloader=True)
if __name__ == '__main__':
run()
| 23.431818
| 74
| 0.625606
|
e63b46dcb71dbc4e79618bd9786031fb87dcc671
| 2,167
|
py
|
Python
|
gooey/tests/test_header.py
|
Jacke/Gooey
|
329b6954befcb74f0243e1282e77ab7bff8e7abf
|
[
"MIT"
] | 13,430
|
2015-01-01T04:52:02.000Z
|
2022-03-31T23:34:03.000Z
|
gooey/tests/test_header.py
|
Jacke/Gooey
|
329b6954befcb74f0243e1282e77ab7bff8e7abf
|
[
"MIT"
] | 669
|
2015-01-02T04:51:28.000Z
|
2022-03-29T08:32:30.000Z
|
gooey/tests/test_header.py
|
Jacke/Gooey
|
329b6954befcb74f0243e1282e77ab7bff8e7abf
|
[
"MIT"
] | 890
|
2015-01-09T19:15:46.000Z
|
2022-03-31T12:34:24.000Z
|
import unittest
from argparse import ArgumentParser
from itertools import *
from tests.harness import instrumentGooey
from gooey.tests import *
class TestGooeyHeader(unittest.TestCase):
def make_parser(self):
parser = ArgumentParser(description='description')
return parser
def test_header_visibility(self):
"""
Test that the title and subtitle components correctly show/hide
based on config settings.
Verifying Issue #497
"""
for testdata in self.testcases():
with self.subTest(testdata):
with instrumentGooey(self.make_parser(), **testdata) as (app, gooeyApp):
header = gooeyApp.header
self.assertEqual(
header._header.IsShown(),
testdata.get('header_show_title', True)
)
self.assertEqual(
header._subheader.IsShown(),
testdata.get('header_show_subtitle', True)
)
def test_header_string(self):
"""
Verify that string in the buildspec get correctly
placed into the UI.
"""
parser = ArgumentParser(description='Foobar')
with instrumentGooey(parser, program_name='BaZzEr') as (app, gooeyApp):
self.assertEqual(gooeyApp.header._header.GetLabelText(), 'BaZzEr')
self.assertEqual(gooeyApp.header._subheader.GetLabelText(), 'Foobar')
def testcases(self):
"""
Generate a powerset of all possible combinations of
the header parameters (empty, some present, all present, all combos)
"""
iterable = product(['header_show_title', 'header_show_subtitle'], [True, False])
allCombinations = list(powerset(iterable))
return [{k: v for k,v in args}
for args in allCombinations]
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
if __name__ == '__main__':
unittest.main()
| 32.833333
| 88
| 0.596677
|
a8e9ad9ff7cc9e10de5b0db6c30f99f73ac60910
| 91,239
|
py
|
Python
|
pandas/tests/arithmetic/test_datetime64.py
|
s-scherrer/pandas
|
837daf18d480cce18c25844c591c39da19437252
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-18T14:29:33.000Z
|
2020-04-18T14:29:33.000Z
|
pandas/tests/arithmetic/test_datetime64.py
|
s-scherrer/pandas
|
837daf18d480cce18c25844c591c39da19437252
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/arithmetic/test_datetime64.py
|
s-scherrer/pandas
|
837daf18d480cce18c25844c591c39da19437252
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-06-19T11:52:05.000Z
|
2020-06-19T11:52:05.000Z
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import datetime, time, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
ts = pd.Timestamp.now(tz)
ser = pd.Series([ts, pd.NaT])
# FIXME: Can't transpose because that loses the tz dtype on
# the NaT column
obj = tm.box_expected(ser, box, transpose=False)
expected = pd.Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox, transpose=False)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")],
[NaT, NaT, pd.Timestamp("2011-01-03")],
),
(
[pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")],
[NaT, NaT, pd.Timedelta("3 days")],
),
(
[pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")],
[NaT, NaT, pd.Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = pd.DatetimeIndex(
[pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
)
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
didx2 = pd.DatetimeIndex(
["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_df_fail):
# GH#18162
box = box_df_fail
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, dz)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dr, list(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(list(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dz, list(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(list(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == list(dr))
assert np.all(list(dr) == dr)
assert np.all(np.array(list(dr), dtype=object) == dr)
assert np.all(dr == np.array(list(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == list(dz))
assert np.all(list(dz) == dz)
assert np.all(np.array(list(dz), dtype=object) == dz)
assert np.all(dz == np.array(list(dz), dtype=object))
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
# GH#18162
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = pd.Timestamp("2000-03-14 01:59")
ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
with pytest.raises(TypeError, match=msg):
op(dz, ts)
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz)
# FIXME: fails with transpose=True due to tz-aware DataFrame
# transpose bug
obj = tm.box_expected(dti, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
pd.Timestamp("2013-01-01"),
pd.Timestamp("2013-01-01").to_pydatetime(),
pd.Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = pd.date_range("2013-01-01", periods=3)
idx = tm.box_expected(idx, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = pd.date_range("20130101", periods=3)
dtarr = tm.box_expected(dti, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = pd.Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
warn = PerformanceWarning if box_with_array is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
pd.Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = pd.date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, kwd in enumerate(relative_kwargs):
off = pd.DateOffset(**dict([kwd]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = pd.DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
# TODO: __sub__, __rsub__
def test_dt64arr_add_mixed_offset_array(self, box_with_array):
# GH#10699
# array of offsets
s = DatetimeIndex([Timestamp("2000-1-1"), Timestamp("2000-2-1")])
s = tm.box_expected(s, box_with_array)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2000-2-29")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# same offset
other = pd.Index(
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
)
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2001-2-1")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# TODO: overlap with test_dt64arr_add_mixed_offset_array?
def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = dtarr + other
expected = DatetimeIndex(
[dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + dtarr
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(warn):
res = dtarr - other
expected = DatetimeIndex(
[dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
pd.DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
pd.DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = pd.DatetimeIndex(exp, tz=tz)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp("1700-01-31")
td = pd.Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
ser = pd.Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(
["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]"
)
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
pd.Timestamp("20111230"),
pd.Timestamp("20120101"),
pd.Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
pd.Timestamp("20111231"),
pd.Timestamp("20120102"),
pd.Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([pd.Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([pd.Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([pd.NaT, Timestamp("19900315")]),
Series([pd.NaT, pd.NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
with pytest.raises(TypeError, match=msg):
dtarr + addend
with pytest.raises(TypeError, match=msg):
addend + dtarr
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
# When adding/subtracting an ndarray (which has no .freq), the result
# does not infer freq
idx = idx._with_freq(None)
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(["2011-01-02", "2011-01-05", "2011-01-08"], name="x")
for result in [idx + delta, np.add(idx, delta)]:
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
exp = DatetimeIndex(["2010-12-31", "2011-01-01", "2011-01-02"], name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == exp.freq
@pytest.mark.parametrize(
"names", [("foo", None, None), ("baz", "bar", None), ("bar", "bar", "bar")]
)
@pytest.mark.parametrize("tz", [None, "America/Chicago"])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
box = pd.Index
other_box = index_or_series
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(box, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)])
xbox = get_upcast_box(box_with_array, other)
expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
result = dtarr + other
tm.assert_equal(result, expected)
expected = pd.DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(warn):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = pd.date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
assert isinstance(result, DatetimeArray)
assert result.freq is None
tm.assert_numpy_array_equal(result._data, expected._data)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert isinstance(result2, TimedeltaArray)
assert result2.shape == (4, 1)
assert result2.freq is None
assert (result2.asi8 == 0).all()
| 36.568737
| 88
| 0.582996
|
b63885b6644c4d3455efd5a7f6d5b086985f0db1
| 957
|
gyp
|
Python
|
base/base_untrusted.gyp
|
codenote/chromium-test
|
0637af0080f7e80bf7d20b29ce94c5edc817f390
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-03-10T13:08:49.000Z
|
2018-03-10T13:08:49.000Z
|
base/base_untrusted.gyp
|
sinmx/chromium-android
|
3fef3b3612d096db83a84126d6f2efacf1962efa
|
[
"Apache-2.0"
] | null | null | null |
base/base_untrusted.gyp
|
sinmx/chromium-android
|
3fef3b3612d096db83a84126d6f2efacf1962efa
|
[
"Apache-2.0"
] | 1
|
2019-10-26T13:42:14.000Z
|
2019-10-26T13:42:14.000Z
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../build/common_untrusted.gypi',
'base.gypi',
],
'conditions': [
['disable_nacl==0 and disable_nacl_untrusted==0', {
'targets': [
{
'target_name': 'base_untrusted',
'type': 'none',
'variables': {
'base_target': 1,
'nacl_untrusted_build': 1,
'nlib_target': 'libbase_untrusted.a',
'build_glibc': 1,
'build_newlib': 1,
'sources': [
'string16.cc',
'sync_socket_nacl.cc',
'time_posix.cc',
],
},
'dependencies': [
'<(DEPTH)/native_client/tools.gyp:prep_toolchain',
],
},
],
}],
],
}
| 24.538462
| 72
| 0.495298
|
f701d938aba4d8300cacab21080c0e69d8ac18c8
| 2,597
|
py
|
Python
|
day05.py
|
spgill/AdventOfCode2021
|
58218062d64de12dac9761a30a1f9762d9a9ab6e
|
[
"MIT"
] | null | null | null |
day05.py
|
spgill/AdventOfCode2021
|
58218062d64de12dac9761a30a1f9762d9a9ab6e
|
[
"MIT"
] | null | null | null |
day05.py
|
spgill/AdventOfCode2021
|
58218062d64de12dac9761a30a1f9762d9a9ab6e
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
### stdlib imports
import pathlib
### local imports
import utils
@utils.part1
def part1(puzzleInput: str):
# Parse the coordinate pairs from the puzzle input
coordList = [
[
tuple(int(coord) for coord in pair.split(","))
for pair in line.split(" -> ")
]
for line in puzzleInput.strip().splitlines()
]
# Dictionary containing lookups for coordinate hits
part1Grid: dict[tuple[int, int], int] = {}
part2Grid: dict[tuple[int, int], int] = {}
# Iterate through each line pair and mark each coordinate the line passes through
for (startX, startY), (endX, endY) in coordList:
xMod = -1 if endX < startX else 1
xRange = range(startX, endX + xMod, xMod)
yMod = -1 if endY < startY else 1
yRange = range(startY, endY + yMod, yMod)
# For horizontal and vertical lines, it's sufficient to simply loop through the coordinates
if startX == endX or startY == endY:
for x in xRange:
for y in yRange:
part1Grid[(x, y)] = part1Grid.get((x, y), 0) + 1
part2Grid[(x, y)] = part2Grid.get((x, y), 0) + 1
# For diagonal lines (45 deg only) we can assume the x and y ranges are equal in length
else:
for i, x in enumerate(xRange):
y = yRange[i]
part2Grid[(x, y)] = part2Grid.get((x, y), 0) + 1
# If the draw option is enabled, create visualization images
if utils.getOption("draw"):
from PIL import Image
maxX, maxY = 0, 0
for (startX, startY), (endX, endY) in coordList:
maxX = max(startX, endX, maxX)
maxY = max(startY, endY, maxY)
for i, grid in enumerate([part1Grid, part2Grid]):
canvas = Image.new("RGB", (maxX + 1, maxY + 1))
for coord, count in grid.items():
canvas.putpixel(
coord, (255, 0, 0) if count > 1 else (255, 255, 255)
)
canvas.save(pathlib.Path.cwd() / f"day05.part{i + 1}.png")
# The answer is the number of grid coordinates with more than one line
utils.printAnswer(len([item for item in part1Grid.items() if item[1] > 1]))
# Pass the part 2 answer to its solution function
return len([item for item in part2Grid.items() if item[1] > 1])
@utils.part2
def part2(_, answer: int):
# Part 1 counted the overlapping points for diagonal lines as well,
# so we can just print the answer
utils.printAnswer(answer)
utils.start()
| 32.4625
| 99
| 0.58298
|
f1aba606237e303796ac4c0c2d7689a69218aa04
| 3,403
|
py
|
Python
|
tests/contacts/test_contact_ftp.py
|
Mati607/caldera
|
895c3ff84715aa7333ea02545ba8d022cbc9e053
|
[
"Apache-2.0"
] | 3,385
|
2017-11-29T02:08:31.000Z
|
2022-03-31T13:38:11.000Z
|
tests/contacts/test_contact_ftp.py
|
Mati607/caldera
|
895c3ff84715aa7333ea02545ba8d022cbc9e053
|
[
"Apache-2.0"
] | 1,283
|
2017-11-29T16:45:31.000Z
|
2022-03-31T20:10:04.000Z
|
tests/contacts/test_contact_ftp.py
|
Mati607/caldera
|
895c3ff84715aa7333ea02545ba8d022cbc9e053
|
[
"Apache-2.0"
] | 800
|
2017-11-29T17:48:43.000Z
|
2022-03-30T22:39:40.000Z
|
import pytest
import os
from app.contacts import contact_ftp
from app.utility.base_world import BaseWorld
beacon_profile = {'architecture': 'amd64',
'contact': 'ftp',
'paw': '8924',
'exe_name': 'sandcat.exe',
'executors': ['cmd', 'psh'],
'group': 'red',
'host': 'testhost',
'location': 'C:\\sandcat.exe',
'pid': 1234,
'platform': 'windows',
'ppid': 123,
'privilege': 'User',
'username': 'testuser'
}
@pytest.fixture(scope='session')
def base_world():
BaseWorld.clear_config()
BaseWorld.apply_config(name='main', config={'app.contact.ftp.host': '0.0.0.0',
'app.contact.ftp.port': '2222',
'app.contact.ftp.pword': 'caldera',
'app.contact.ftp.server.dir': 'ftp_dir',
'app.contact.ftp.user': 'caldera_user',
'plugins': ['sandcat', 'stockpile'],
'crypt_salt': 'BLAH',
'api_key': 'ADMIN123',
'encryption_key': 'ADMIN123'})
BaseWorld.apply_config(name='agents', config={'sleep_max': 5,
'sleep_min': 5,
'untrusted_timer': 90,
'watchdog': 0,
'implant_name': 'splunkd',
'bootstrap_abilities': [
'43b3754c-def4-4699-a673-1d85648fda6a'
]})
yield BaseWorld
BaseWorld.clear_config()
@pytest.fixture()
def ftp_c2(loop, app_svc, base_world, contact_svc, data_svc, file_svc, obfuscator):
services = app_svc(loop).get_services()
ftp_c2 = contact_ftp.Contact(services)
return ftp_c2
@pytest.fixture()
def ftp_c2_my_server(ftp_c2):
ftp_c2.set_up_server()
return ftp_c2.server
class TestFtpServer:
@staticmethod
def test_server_setup(ftp_c2):
assert ftp_c2.name == 'ftp'
assert ftp_c2.description == 'Accept agent beacons through ftp'
assert ftp_c2.host == '0.0.0.0'
assert ftp_c2.port == '2222'
assert ftp_c2.directory == 'ftp_dir'
assert ftp_c2.user == 'caldera_user'
assert ftp_c2.pword == 'caldera'
assert ftp_c2.server is None
@staticmethod
def test_set_up_server(ftp_c2):
ftp_c2.set_up_server()
assert ftp_c2.server is not None
@staticmethod
def test_my_server_setup(ftp_c2_my_server):
assert ftp_c2_my_server.host == '0.0.0.0'
assert ftp_c2_my_server.port == '2222'
assert ftp_c2_my_server.login == 'caldera_user'
assert ftp_c2_my_server.pword == 'caldera'
assert ftp_c2_my_server.ftp_server_dir == os.path.join(os.getcwd(), 'ftp_dir')
assert os.path.exists(ftp_c2_my_server.ftp_server_dir)
os.rmdir(ftp_c2_my_server.ftp_server_dir)
| 39.569767
| 92
| 0.489274
|
e72254380ee2414185a347800c127fb13d718c3a
| 12,404
|
py
|
Python
|
scripts/contract_interaction.py
|
defistar/Sovryn-smart-contracts
|
57562f62dc126c154c998b051dc75ea09e98f87a
|
[
"Apache-2.0"
] | 1
|
2020-11-18T13:40:56.000Z
|
2020-11-18T13:40:56.000Z
|
scripts/contract_interaction.py
|
defistar/Sovryn-smart-contracts
|
57562f62dc126c154c998b051dc75ea09e98f87a
|
[
"Apache-2.0"
] | null | null | null |
scripts/contract_interaction.py
|
defistar/Sovryn-smart-contracts
|
57562f62dc126c154c998b051dc75ea09e98f87a
|
[
"Apache-2.0"
] | 1
|
2020-11-18T13:41:30.000Z
|
2020-11-18T13:41:30.000Z
|
'''
This script serves the purpose of interacting with existing smart contracts on the testnet.
'''
from brownie import *
from brownie.network.contract import InterfaceContainer
def main():
acct = accounts.load("rskdeployer")
iSUSD = '0xD1A979EDE2c17FCD31800Bed859e5EC3DA178Cb9'
iRBTC = '0x08118a219a4e34E06176cD0861fcDDB865771111'
iSUSDSettings = '0x588F22EaeEe37d9BD0174de8e76df9b69D3Ee4eC'
iRBTCSettings = '0x99DcD929027a307D76d5ca912Eec1C0aE3FA6DDF'
iSUSDLogic = '0x48f96e4e8adb8db5B70538b58DaDE4a89E2F9DF0'
iRBTCLogic = '0xCA27bC90C76fc582406fBC4665832753f74A75F5'
protocol = '0x74808B7a84327c66bA6C3013d06Ed3DD7664b0D4'
testSUSD = '0xE631653c4Dc6Fb98192b950BA0b598f90FA18B3E'
testRBTC ='0xE53d858A78D884659BF6955Ea43CBA67c0Ae293F'
#setPriceFeeds(acct)
#mintTokens(acct, iSUSD, iRBTC)
#burnTokens(acct, iSUSD, iRBTC)
#readLendingFee(acct)
#setupLoanTokenRates(acct, iSUSD, iSUSDSettings, iSUSDLogic)
#setupLoanTokenRates(acct, iRBTC, iRBTCSettings, iRBTCLogic)
#lendToPools(acct, iSUSD, iRBTC)
#removeFromPool(acct, iSUSD, iRBTC)
#readLoanTokenState(acct, iSUSD)
#readLoanTokenState(acct, iRBTC)
#readLoan(acct, protocol, '0xde1821f5678c33ca4007474735d910c0b6bb14f3fa0734447a9bd7b75eaf68ae')
#getTokenPrice(acct, iRBTC)
#testTokenBurning(acct, iRBTC, testRBTC)
#liquidate(acct, protocol, '0x5f8d4599657b3d24eb4fee83974a43c62f411383a8b5750b51adca63058a0f59')
#testTradeOpeningAndClosing(acct, protocol,iSUSD,testSUSD,testRBTC)
testBorrow(acct,protocol,iSUSD,testSUSD,testRBTC)
#setupTorqueLoanParams(acct,iSUSD,iSUSDSettings,testSUSD,testRBTC)
def setPriceFeeds(acct):
priceFeedContract = '0xf2e9fD37912aB53D0FEC1eaCE86d6A14346Fb6dD'
wethAddress = '0x602C71e4DAC47a042Ee7f46E0aee17F94A3bA0B6'
rbtcAddress ='0xE53d858A78D884659BF6955Ea43CBA67c0Ae293F'
susdAddress = '0xE631653c4Dc6Fb98192b950BA0b598f90FA18B3E'
feeds = Contract.from_abi("PriceFeedsLocal", address=priceFeedContract, abi=PriceFeedsLocal.abi, owner=acct)
feeds.setRates(
wethAddress,
rbtcAddress,
0.34e18
)
feeds.setRates(
wethAddress,
susdAddress,
382e18
)
def mintTokens(acct, iSUSD, iRBTC):
susd = Contract.from_abi("TestToken", address = '0xE631653c4Dc6Fb98192b950BA0b598f90FA18B3E', abi = TestToken.abi, owner = acct)
rbtc = Contract.from_abi("TestToken", address = '0xE53d858A78D884659BF6955Ea43CBA67c0Ae293F', abi = TestToken.abi, owner = acct)
susd.mint(iSUSD,1e50)
rbtc.mint(iRBTC,1e50)
def burnTokens(acct, iSUSD, iRBTC):
susd = Contract.from_abi("TestToken", address = '0xE631653c4Dc6Fb98192b950BA0b598f90FA18B3E', abi = TestToken.abi, owner = acct)
rbtc = Contract.from_abi("TestToken", address = '0xE53d858A78D884659BF6955Ea43CBA67c0Ae293F', abi = TestToken.abi, owner = acct)
#susd.burn(iSUSD,1e50)
rbtc.burn(iRBTC,1e50)
def readLendingFee(acct):
sovryn = Contract.from_abi("sovryn", address='0xBAC609F5C8bb796Fa5A31002f12aaF24B7c35818', abi=interface.ISovryn.abi, owner=acct)
lfp = sovryn.lendingFeePercent()
print(lfp/1e18)
def setupLoanTokenRates(acct, loanTokenAddress, settingsAddress, logicAddress):
baseRate = 1e18
rateMultiplier = 20.25e18
localLoanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanToken.abi, owner=acct)
localLoanToken.setTarget(settingsAddress)
localLoanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenSettingsLowerAdmin.abi, owner=acct)
localLoanToken.setDemandCurve(baseRate,rateMultiplier,baseRate,rateMultiplier)
localLoanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanToken.abi, owner=acct)
localLoanToken.setTarget(logicAddress)
localLoanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
borrowInterestRate = localLoanToken.borrowInterestRate()
print("borrowInterestRate: ",borrowInterestRate)
def lendToPools(acct, iSUSDaddress, iRBTCaddress):
susd = Contract.from_abi("TestToken", address = '0xE631653c4Dc6Fb98192b950BA0b598f90FA18B3E', abi = TestToken.abi, owner = acct)
rbtc = Contract.from_abi("TestToken", address = '0xE53d858A78D884659BF6955Ea43CBA67c0Ae293F', abi = TestToken.abi, owner = acct)
iSUSD = Contract.from_abi("loanToken", address=iSUSDaddress, abi=LoanTokenLogicStandard.abi, owner=acct)
iRBTC = Contract.from_abi("loanToken", address=iRBTCaddress, abi=LoanTokenLogicStandard.abi, owner=acct)
susd.approve(iSUSD,1e40)
rbtc.approve(iRBTC,1e40)
iSUSD.mint(acct, 1e30)
iRBTC.mint(acct, 1e30)
def removeFromPool(acct, iSUSDaddress, iRBTCaddress):
susd = Contract.from_abi("TestToken", address = '0xE631653c4Dc6Fb98192b950BA0b598f90FA18B3E', abi = TestToken.abi, owner = acct)
rbtc = Contract.from_abi("TestToken", address = '0xE53d858A78D884659BF6955Ea43CBA67c0Ae293F', abi = TestToken.abi, owner = acct)
iSUSD = Contract.from_abi("loanToken", address=iSUSDaddress, abi=LoanTokenLogicStandard.abi, owner=acct)
iRBTC = Contract.from_abi("loanToken", address=iRBTCaddress, abi=LoanTokenLogicStandard.abi, owner=acct)
iSUSD.burn(acct, 99e28)
iRBTC.burn(acct, 99e28)
def readLoanTokenState(acct, loanTokenAddress):
'''
susd = Contract.from_abi("TestToken", address = '0xE631653c4Dc6Fb98192b950BA0b598f90FA18B3E', abi = TestToken.abi, owner = acct)
balance = susd.balanceOf(loanTokenAddress)
print("contract susd balance", balance/1e18)
'''
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
tas = loanToken.totalAssetSupply()
print("total supply", tas/1e18);
#print((balance - tas)/1e18)
tab = loanToken.totalAssetBorrow()
print("total asset borrowed", tab/1e18)
abir = loanToken.avgBorrowInterestRate()
print("average borrow interest rate", abir/1e18)
ir = loanToken.nextSupplyInterestRate(0)
print("interest rate", ir)
def readLoan(acct, protocolAddress, loanId):
sovryn = Contract.from_abi("sovryn", address=protocolAddress, abi=interface.ISovryn.abi, owner=acct)
print(sovryn.getLoan(loanId).dict())
def getTokenPrice(acct, loanTokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
print("token price",loanToken.tokenPrice())
def testTokenBurning(acct, loanTokenAddress, testTokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
testToken = Contract.from_abi("TestToken", address = testTokenAddress, abi = TestToken.abi, owner = acct)
testToken.approve(loanToken,1e17)
loanToken.mint(acct, 1e17)
balance = loanToken.balanceOf(acct)
print("balance", balance)
tokenPrice = loanToken.tokenPrice()
print("token price",tokenPrice/1e18)
burnAmount = int(balance / 2)
print("burn amount", burnAmount)
tx = loanToken.burn(acct, burnAmount)
print(tx.info())
balance = loanToken.balanceOf(acct)
print("remaining balance", balance/1e18)
assert(tx.events["Burn"]["tokenAmount"] == burnAmount)
def liquidate(acct, protocolAddress, loanId):
sovryn = Contract.from_abi("sovryn", address=protocolAddress, abi=interface.ISovryn.abi, owner=acct)
loan = sovryn.getLoan(loanId).dict()
print(loan)
if(loan['maintenanceMargin'] > loan['currentMargin']):
testToken = Contract.from_abi("TestToken", address = loan['loanToken'], abi = TestToken.abi, owner = acct)
testToken.mint(acct, loan['maxLiquidatable'])
testToken.approve(sovryn, loan['maxLiquidatable'])
sovryn.liquidate(loanId, acct, loan['maxLiquidatable'])
else:
print("can't liquidate because the loan is healthy")
def testTradeOpeningAndClosing(acct, protocolAddress, loanTokenAddress, underlyingTokenAddress, collateralTokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
testToken = Contract.from_abi("TestToken", address = underlyingTokenAddress, abi = TestToken.abi, owner = acct)
sovryn = Contract.from_abi("sovryn", address=protocolAddress, abi=interface.ISovryn.abi, owner=acct)
loan_token_sent = 100e18
testToken.mint(acct, loan_token_sent)
testToken.approve(loanToken, loan_token_sent)
tx = loanToken.marginTrade(
"0", # loanId (0 for new loans)
2e18, # leverageAmount
loan_token_sent, # loanTokenSent
0, # no collateral token sent
collateralTokenAddress, # collateralTokenAddress
acct, # trader,
b'' # loanDataBytes (only required with ether)
)
loanId = tx.events['Trade']['loanId']
collateral = tx.events['Trade']['positionSize']
print("closing loan with id", loanId)
print("position size is ", collateral)
loan = sovryn.getLoan(loanId)
print("found the loan in storage with position size", loan['collateral'])
tx = sovryn.closeWithSwap(loanId, acct, collateral, True, b'')
def testBorrow(acct, protocolAddress, loanTokenAddress, underlyingTokenAddress, collateralTokenAddress):
#read contract abis
sovryn = Contract.from_abi("sovryn", address=protocolAddress, abi=interface.ISovryn.abi, owner=acct)
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
testToken = Contract.from_abi("TestToken", address = collateralTokenAddress, abi = TestToken.abi, owner = acct)
# determine borrowing parameter
withdrawAmount = 10e18 #i want to borrow 10 USD
# compute the required collateral. params: address loanToken, address collateralToken, uint256 newPrincipal,uint256 marginAmount, bool isTorqueLoan
collateralTokenSent = sovryn.getRequiredCollateral(underlyingTokenAddress,collateralTokenAddress,withdrawAmount,50e18, True)
print("collateral needed", collateralTokenSent)
durationInSeconds = 60*60*24*10 #10 days
#check requirements
totalSupply = loanToken.totalSupply()
totalBorrowed = loanToken.totalAssetBorrow()
print('available supply:', totalSupply - totalBorrowed)
assert(totalSupply - totalBorrowed >= withdrawAmount)
interestRate = loanToken.nextBorrowInterestRate(withdrawAmount)
print('interest rate (needs to be > 0):', interestRate)
assert(interestRate > 0)
#approve the transfer of the collateral if needed
if(testToken.allowance(acct, loanToken.address) < collateralTokenSent):
testToken.approve(loanToken.address, collateralTokenSent)
# borrow some funds
tx = loanToken.borrow(
"0", # bytes32 loanId
withdrawAmount, # uint256 withdrawAmount
durationInSeconds, # uint256 initialLoanDuration
collateralTokenSent, # uint256 collateralTokenSent
testToken.address, # address collateralTokenAddress
acct, # address borrower
acct, # address receiver
b'' # bytes memory loanDataBytes
)
#assert the trade was processed as expected
print(tx.info())
def setupTorqueLoanParams(acct, loanTokenAddress, loanTokenSettingsAddress, underlyingTokenAddress, collateralTokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
loanTokenSettings = Contract.from_abi("loanTokenSettings", address=loanTokenSettingsAddress, abi=LoanTokenSettingsLowerAdmin.abi, owner=acct)
params = [];
setup = [
b"0x0", ## id
False, ## active
str(accounts[0]), ## owner
underlyingTokenAddress, ## loanToken
collateralTokenAddress, ## collateralToken.
Wei("50 ether"), ## minInitialMargin
Wei("15 ether"), ## maintenanceMargin
0 ## fixedLoanTerm
]
params.append(setup)
calldata = loanTokenSettings.setupTorqueLoanParams.encode_input(params)
tx = loanToken.updateSettings(loanTokenSettings.address, calldata)
assert('LoanParamsSetup' in tx.events)
assert('LoanParamsIdSetup' in tx.events)
print(tx.info())
| 50.628571
| 152
| 0.733554
|
df23e69bdbbf58545e6f32b6895aa9addd38d03e
| 355
|
py
|
Python
|
setup.py
|
Danielhp95/gym-kuhn-poker
|
28fc047a0c605ae38cb01eb3fe7fee4c7e8114db
|
[
"MIT"
] | 9
|
2020-03-07T19:03:05.000Z
|
2021-12-21T19:38:57.000Z
|
setup.py
|
Danielhp95/gym-kuhn-poker
|
28fc047a0c605ae38cb01eb3fe7fee4c7e8114db
|
[
"MIT"
] | null | null | null |
setup.py
|
Danielhp95/gym-kuhn-poker
|
28fc047a0c605ae38cb01eb3fe7fee4c7e8114db
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='gym_kuhn_poker',
version='0.2',
description='OpenAI gym environment for Kuhn poker',
url='https://github.com/Danielhp95/gym-kuhn-poker',
author='Sarios',
author_email='madness@xcape.com',
packages=find_packages(),
install_requires=['gym', 'numpy']
)
| 29.583333
| 58
| 0.664789
|
5abb08f7b487ec986ece9c7e579a947dc43f385c
| 4,728
|
py
|
Python
|
src/sha256.py
|
c1m50c/sha256
|
68d79f71217e4e95ba506d1ff43af3caaf4a772a
|
[
"MIT"
] | null | null | null |
src/sha256.py
|
c1m50c/sha256
|
68d79f71217e4e95ba506d1ff43af3caaf4a772a
|
[
"MIT"
] | null | null | null |
src/sha256.py
|
c1m50c/sha256
|
68d79f71217e4e95ba506d1ff43af3caaf4a772a
|
[
"MIT"
] | null | null | null |
# FIXME: Encoding is not properly hashing the given input.
from typing import List, Union
# Constants #
ADDITION_MODULO: int = 2 ** 32 # Modulo to perform addition in, defined in the specification sheet.
WORD_BITS: int = 32 # Bits of Words, defined in the specification sheet.
K: List[int] = [
# Word Constants ~ Spec 4.2.2 #
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
]
def encode(message: Union[str, bytes, bytearray]) -> bytes:
message_arr: bytearray = message
# Type Checking #
if isinstance(message, str):
message_arr = bytearray(message, "ascii")
elif isinstance(message, bytes):
message_arr = bytearray(message)
elif not isinstance(message, bytearray):
raise TypeError("Passed Message was not a valid type, type needs to be of 'str', 'bytes', or 'bytearray'.")
# Padding ~ Spec 5.1.1 #
message_length: int = len(message_arr) * 8
message_arr.append(0x01)
while (len(message_arr) * 8 + 64) % 512 != 0:
message_arr.append(0x00)
message_arr += message_length.to_bytes(8, "big")
assert (len(message_arr) * 8) % 512 == 0, "Message could not be properly padded."
# Parsing ~ Spec 5.2.1 #
chunks: List[bytearray] = [ message_arr[i : i + 64] for i in range(0, len(message_arr), 64) ]
# Set Intial Hash Values ~ Spec 5.3.2 #
hash_table: List[int] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
]
# Computation ~ Spec 6.2.2 #
for c in chunks:
w: List[bytes] = [ ]
for t in range(0, 64):
if t <= 15:
w.append(bytes(c[t * 4 : (t * 4) + 4]))
else:
x: int = lc_sigma_1(int.from_bytes(w[t - 2], "big")) + int.from_bytes(w[t - 7], "big") + \
lc_sigma_0(int.from_bytes(w[t - 15], "big")) + int.from_bytes(w[t - 16], "big")
w.append((x % ADDITION_MODULO).to_bytes(4, "big"))
assert len(w) == 64, "Could not properly create a message schedule."
a, b, c, d, e, f, g, h = hash_table
for t in range(0, 64):
t1: int = (h + sigma_1(e) + ch(e, f, g) + K[t] + int.from_bytes(w[t], "big")) % ADDITION_MODULO
t2: int = (sigma_0(a) + maj(a, b, c)) % ADDITION_MODULO
h, g, f = g, f, e
e = (d + t1) % ADDITION_MODULO
d, c, b = c, b, a
a = (t1 + t2) % ADDITION_MODULO
hash_table[0] = (a + hash_table[0]) % ADDITION_MODULO
hash_table[1] = (b + hash_table[1]) % ADDITION_MODULO
hash_table[2] = (c + hash_table[2]) % ADDITION_MODULO
hash_table[3] = (d + hash_table[3]) % ADDITION_MODULO
hash_table[4] = (e + hash_table[4]) % ADDITION_MODULO
hash_table[5] = (f + hash_table[5]) % ADDITION_MODULO
hash_table[6] = (g + hash_table[6]) % ADDITION_MODULO
hash_table[7] = (h + hash_table[7]) % ADDITION_MODULO
result: bytes = hash_table[0].to_bytes(4, "big") + hash_table[1].to_bytes(4, "big") + \
hash_table[2].to_bytes(4, "big") + hash_table[3].to_bytes(4, "big") + \
hash_table[4].to_bytes(4, "big") + hash_table[5].to_bytes(4, "big") + \
hash_table[6].to_bytes(4, "big") + hash_table[7].to_bytes(4, "big")
return result
# Helper Functions ~ Spec 4.1.2 #
rotate_right = lambda x, n : (x >> n) | (x << WORD_BITS - n)
# rotate_left = lambda x, n : (x << n) | (x >> WORD_BITS - n)
ch = lambda x, y, z : (x & y) ^ (x & z)
maj = lambda x, y, z : (x & y) ^ (x & z) ^ (y & z)
sigma_0 = lambda x : rotate_right(x, 2) ^ rotate_right(x, 13) ^ rotate_right(x, 22)
sigma_1 = lambda x : rotate_right(x, 6) ^ rotate_right(x, 11) ^ rotate_right(x, 25)
lc_sigma_0 = lambda x : rotate_right(x, 7) ^ rotate_right(x, 18) ^ (x >> 3)
lc_sigma_1 = lambda x : rotate_right(x, 17) ^ rotate_right(x, 19) ^ (x >> 10)
| 43.777778
| 115
| 0.606599
|
75c5c2c80c501d338049d9de133436ef90d88c97
| 12,263
|
py
|
Python
|
weighted_graph.py
|
michibo/feyncop
|
19aafd73feb39335e0e1451c81c5b2a50af01112
|
[
"MIT"
] | 12
|
2015-02-02T12:39:47.000Z
|
2021-03-24T13:25:04.000Z
|
weighted_graph.py
|
michibo/feyncop
|
19aafd73feb39335e0e1451c81c5b2a50af01112
|
[
"MIT"
] | 1
|
2016-02-05T00:13:20.000Z
|
2016-02-05T00:13:54.000Z
|
weighted_graph.py
|
michibo/feyncop
|
19aafd73feb39335e0e1451c81c5b2a50af01112
|
[
"MIT"
] | 1
|
2016-02-05T12:58:29.000Z
|
2016-02-05T12:58:29.000Z
|
"""weighted_graph.py: This file is part of the feyncop/feyngen package.
Implements the WeightedGraph class. """
# See also: http://people.physik.hu-berlin.de/~borinsky/
__author__ = "Michael Borinsky"
__email__ = "borinsky@physik.hu-berlin.de"
__copyright__ = "Copyright (C) 2014 Michael Borinsky"
__license__ = "MIT License"
__version__ = "1.0"
# Copyright (c) 2014 Michael Borinsky
# This program is distributed under the MIT License:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from math import *
import copy, itertools
from stuff import *
from graph import Graph
class WeightedGraph(Graph):
"""This class extends the basic utilities in the Graph class by the tools
to handle QED and Yang-Mills graphs."""
def __init__( self, edges, edge_weights, symmetry_factor=0 ):
"""Initializes the WeightedGraph class. Edges, edge_weights and
symmetry_factor can be provided."""
if len(edges) != len(edge_weights):
raise
super(WeightedGraph, self).__init__( edges, symmetry_factor )
self.edge_weights = edge_weights
def get_edge_str( self, e ):
"""Return a readable string of the edges of the graph."""
v1,v2 = self.edges[e]
w = self.edge_weights[e]
wDict = [ '0', 'f', 'A', 'c' ]
return "[%d,%d,%c]" % (v1,v2,wDict[w])
def get_edges_tuple( self ):
"""Get a unique tuple to identify the graph. (Unique only for every labeling)."""
return tuple( sorted( ( tuple( sorted(edge) if w==2 else edge ), w) for edge,w in zip(self.edges,self.edge_weights) ) )
def graph_from_sub_edges( self, sub_edges ):
"""Create a new graph from a sub set of its edges."""
sub_graph = super(WeightedGraph, self).graph_from_sub_edges( sub_edges )
sub_graph.edge_weights = tuple( self.edge_weights[e] for e in sorted(sub_edges) )
return sub_graph
def sub_edges_by_weight( self, weight ):
"""Returns all subedges with a certain weight."""
return frozenset( e for e,w in enumerate(self.edge_weights) if w == weight )
@property
def residue_type( self ):
"""Returns the residue type of the graph."""
def dir_e(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return -1
else: return 1
ext_types = [ dir_e(e,v) * self.edge_weights[e] for v in self.external_vtcs_set for e in self.adj_edges( v, self.edges_set ) ]
return tuple(sorted(ext_types))
def get_vtx_type( self, v ):
"""Returns the type of the vertex v in the same format as
residue_type."""
def dir1(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return -1
else: return 1
def dir2(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return 1
else: return -1
adj_types = [ dir1(e,v)*self.edge_weights[e] for e in self.adj_edges( v, self.edges_set ) ]
adj_types += [ dir2(e,v)*self.edge_weights[e] for e in self.edges_set if self.edges[e] == (v,v) ]
return tuple(sorted(adj_types))
def get_vtcs_coloring( self ):
"""Helper function: Calculate the vertex coloring in a format suitable
for the canonical labeling calculation."""
# All vertices with different numbers of selfloops of different type
# are colored in another way.
dictWeights = { edge : self.edge_weights[e] for e,edge in enumerate(self.edges) }
edge_degree_counter = self.edge_degree_counter(self.edges_set)
selfloop_degree_list = [ (edge_degree_counter[(v,v)],dictWeights[(v,v)] if edge_degree_counter[(v,v)] else 2) for v in self.internal_vtcs_set ]
# Sorting is important for the v even for all similar mul!
selfloop_multiplicity_list = sorted( (mul,v) for v, mul in zip(self.internal_vtcs_set, selfloop_degree_list) )
( ( max_selfloop_multiplicity, _), _ ) = selfloop_multiplicity_list[-1] if selfloop_multiplicity_list else ((0,2), 0)
self_loop_list = [ frozenset( vtx for mul, vtx in filter( lambda ((mul, we), vtx) : mul == i and we == w, selfloop_multiplicity_list ) ) for i in range( max_selfloop_multiplicity+1 ) for w in (1,2,3) ]
# External vertices all have the same color still.
return self_loop_list + [ self.external_vtcs_set ]
def get_edges_coloring( self, edges_set ):
"""Helper function: Calculate the edge coloring in a format suitable
for the canonical labeling calculation."""
# Fermions, bosons and ghosts need different color classes.
fermion_edges_set = self.sub_edges_by_weight(1) & edges_set
boson_edges_set = self.sub_edges_by_weight(2) & edges_set
ghost_edges_set = self.sub_edges_by_weight(3) & edges_set
fermion_edges = frozenset( self.edges[i] for i in fermion_edges_set if not self.is_selfloop(self.edges[i]) )
ghost_edges = frozenset( self.edges[i] for i in ghost_edges_set if not self.is_selfloop(self.edges[i]) )
boson_edges = frozenset( self.edges[i] for i in boson_edges_set )
# Fermions and ghosts need orientation. Bosons not!
# For higher performance some special cases of boson-fermion-ghost
# edge combinations are included.
normalize = lambda edge : (max(edge),min(edge))
flip = lambda (x,y) : (y,x)
fermion_loops = frozenset( normalize(edge) for edge in fermion_edges if flip(edge) in fermion_edges )
ghost_loops = frozenset( normalize(edge) for edge in ghost_edges if flip(edge) in ghost_edges )
reduced_fermion_edges = fermion_edges - fermion_loops - frozenset( flip(edge) for edge in fermion_loops )
reduced_ghost_edges = ghost_edges - ghost_loops - frozenset( flip(edge) for edge in ghost_loops )
boson_fermion_loops = frozenset( edge for edge in reduced_fermion_edges if flip(edge) in boson_edges or edge in boson_edges )
boson_ghost_loops = frozenset( edge for edge in reduced_ghost_edges if flip(edge) in boson_edges or edge in boson_edges )
reduced_boson_edges = boson_edges - boson_fermion_loops - frozenset( flip(edge) for edge in boson_fermion_loops ) - boson_ghost_loops - frozenset( flip(edge) for edge in boson_ghost_loops )
dbl_boson_edges = reduced_boson_edges | frozenset( flip(edge) for edge in reduced_boson_edges )
if len(dbl_boson_edges&reduced_fermion_edges) != 0 or \
len(dbl_boson_edges&reduced_ghost_edges) != 0:
print dbl_boson_edges, reduced_fermion_edges
raise
# Calculate the boson coloring as in the Graph class.
boson_coloring = super( WeightedGraph, self).get_edges_coloring( boson_edges_set )
return [ dbl_boson_edges | reduced_fermion_edges | reduced_ghost_edges,
fermion_loops, boson_fermion_loops, ghost_loops, boson_ghost_loops,
reduced_ghost_edges - boson_ghost_loops ] + boson_coloring[1:]
def get_trivial_symmetry_factor( self ):
"""Calculates the trivial factor in the symmetry factor. Only
considers edge multiplicity and self loops."""
grpSize = 1
boson_edges = self.sub_edges_by_weight(2)
edge_degree_counter = self.edge_degree_counter(boson_edges)
for mul_edge_deg in ( m for edge, m in edge_degree_counter.iteritems() if not self.is_selfloop(edge) ):
grpSize*= factorial(mul_edge_deg)
for selfloop_deg in ( m for edge, m in edge_degree_counter.iteritems() if self.is_selfloop(edge) ):
grpSize*= double_factorial(2*selfloop_deg)
return grpSize
def permute_external_edges( self ):
"""Generate all possible graphs with fixed external legs from the
graph provided that the graph is non-leg-fixed."""
class FixedGraph( type(self) ):
def get_vtcs_coloring( self ):
vtcs_coloring = super(FixedGraph, self).get_vtcs_coloring()
vtcs_coloring = [ c - self.external_vtcs_set for c in vtcs_coloring]
vtcs_coloring.extend( frozenset([v]) for v in sorted(self.external_vtcs_set) )
return vtcs_coloring
extern_boson_vtcs = \
frozenset( v for e in self.sub_edges_by_weight(2) for v in self.edges[e] ) \
& self.external_vtcs_set
extern_in_fermion_vtcs = \
frozenset( self.edges[e][0] for e in self.sub_edges_by_weight(1) ) \
& self.external_vtcs_set
extern_out_fermion_vtcs = \
frozenset( self.edges[e][1] for e in self.sub_edges_by_weight(1) ) \
& self.external_vtcs_set
extern_in_ghost_vtcs = \
frozenset( self.edges[e][0] for e in self.sub_edges_by_weight(3) ) \
& self.external_vtcs_set
extern_out_ghost_vtcs = \
frozenset( self.edges[e][1] for e in self.sub_edges_by_weight(3) ) \
& self.external_vtcs_set
extern_vtcs_list = list(extern_boson_vtcs) + \
list(extern_in_fermion_vtcs) + \
list(extern_out_fermion_vtcs) + \
list(extern_in_ghost_vtcs) + \
list(extern_out_ghost_vtcs)
if frozenset(extern_vtcs_list) != self.external_vtcs_set:
raise
vtcs_list = list(self.internal_vtcs_set) + \
extern_vtcs_list
for perm0 in itertools.permutations( extern_boson_vtcs ):
for perm1 in itertools.permutations( extern_in_fermion_vtcs ):
for perm2 in itertools.permutations( extern_out_fermion_vtcs ):
for perm3 in itertools.permutations( extern_in_ghost_vtcs ):
for perm4 in itertools.permutations( extern_out_ghost_vtcs ):
new_vtcs_list = tuple(self.internal_vtcs_set) + \
perm0 + perm1 + perm2 + perm3 + perm4
m = dict( zip( vtcs_list, new_vtcs_list ) )
def relabel_edge( (v1,v2) ):
return (m[v1], m[v2])
yield FixedGraph(
[ relabel_edge(edge) for edge in self.edges ], self.edge_weights, 0 )
@property
def clean_graph( self ):
"""Orders the edge- and weight list of the graph in a transparent manner."""
ext_sorter = ( e in self.external_edges_set for e,edge in enumerate(self.edges) )
norm = lambda (edge) : (max(edge),min(edge))
edges = [ norm(edge) if w == 2 else edge for w,edge in zip(self.edge_weights, self.edges) ]
xwe_list = list(sorted(zip(ext_sorter, self.edge_weights, edges)))
edges = [ edge for x,w,edge in xwe_list ]
weights = [ w for x,w,edge in xwe_list ]
g = copy.copy(self)
g.edges = tuple(edges)
g.edge_weights= tuple(weights)
g.prepare_graph()
return g
| 47.34749
| 209
| 0.645519
|
4930c1eb499e9af51e7ac73532919e9ace902e97
| 1,721
|
py
|
Python
|
examples/zips/soundly_pylons/soundly_pylons.py
|
PhilNyeThePhysicsGuy/refraction_render
|
c315d7c23db990b9609386a1e16be76b55bcb235
|
[
"BSD-2-Clause"
] | 1
|
2018-07-06T08:32:57.000Z
|
2018-07-06T08:32:57.000Z
|
examples/zips/soundly_pylons/soundly_pylons.py
|
PhilNyeThePhysicsGuy/refraction_render
|
c315d7c23db990b9609386a1e16be76b55bcb235
|
[
"BSD-2-Clause"
] | null | null | null |
examples/zips/soundly_pylons/soundly_pylons.py
|
PhilNyeThePhysicsGuy/refraction_render
|
c315d7c23db990b9609386a1e16be76b55bcb235
|
[
"BSD-2-Clause"
] | 2
|
2018-07-25T19:15:58.000Z
|
2021-03-02T12:30:09.000Z
|
from refraction_render.renderers import Scene,Renderer_35mm
from refraction_render.calcs import CurveCalc,FlatCalc
from pyproj import Geod
import numpy as np
import os
def T_prof(h):
e1 = np.exp(h/1.5)
e2 = np.exp(h/0.1)
return (2/(1+e1))*0.1+(2/(1+e2))*0.05
calc_args = dict(T_prof=T_prof)
calc = CurveCalc(**calc_args)
s = Scene()
geod = Geod(ellps="sphere")
# gps coordinates for the first two pylons
lat_1,lon_1 = 30.084791, -90.401287
lat_2,lon_2 = 30.087219, -90.400237
# getting the distance between pylongs and the heading in which
# the rest of the pylons will follow across the lake
f_az,b_az,dist = geod.inv(lon_1,lat_1,lon_2,lat_2)
# calculating the distances (Note I got this info from google earth)
dists = np.arange(0,24820,dist)
# image path for pylon image
image_path ="pylon.png"
# looping over distances calculating the gps position of each pylon and
# adding an image in that position
lat_f = 0
lon_f = 0
for d in dists:
lon,lat,b_az = geod.fwd(lon_1,lat_1,f_az,d)
lat_f += d*lat
lon_f += d*lon
s.add_image(image_path,(0,lat,lon),dimensions=(-1,23),direction=b_az)
# Soundly's position
lat_i, lon_i = 30.077320, -90.404888
# use weighted average of positions with distance to get center frame.
lat_f, lon_f = lat_f/dists.sum(), lon_f/dists.sum()
# render image with wide field of view
renderer = Renderer_35mm(calc,10,lat_i,lon_i,(lat_f,lon_f),40000,
vert_obs_angle=0.0,vert_res=2000,focal_length=600)
renderer.render_scene(s,"soundly_pylons.png")
# render image with small field of view effectively zooming in
renderer = Renderer_35mm(calc,10,lat_i,lon_i,(lat_f,lon_f),40000,
vert_obs_angle=0.0,vert_res=2000,focal_length=2000)
renderer.render_scene(s,"soundly_pylons_zoom.png")
| 32.471698
| 71
| 0.756537
|
b889954e5a0ff8df51ff128ef093144cf22f521f
| 1,010
|
py
|
Python
|
AtCoder/ABC/033/d.py
|
ttyskg/ProgrammingCompetition
|
65fb9e131803e4f1a1a6369e68ed1b504f08b00f
|
[
"MIT"
] | null | null | null |
AtCoder/ABC/033/d.py
|
ttyskg/ProgrammingCompetition
|
65fb9e131803e4f1a1a6369e68ed1b504f08b00f
|
[
"MIT"
] | null | null | null |
AtCoder/ABC/033/d.py
|
ttyskg/ProgrammingCompetition
|
65fb9e131803e4f1a1a6369e68ed1b504f08b00f
|
[
"MIT"
] | null | null | null |
from bisect import bisect_right, bisect_left
from math import pi, atan2
import sys
def main():
input = sys.stdin.readline
N = int(input())
pos = [tuple(map(int, input().split())) for _ in range(N)]
ERR = 1e-9
right = 0
obtuse = 0
for ori in pos:
angles = [atan2(a[1] - ori[1], a[0] - ori[0]) for a in pos if a != ori]
angles = sorted(angles)
angles += [a + 2*pi for a in angles]
for i in range(N-1):
base = angles[i]
# s: start position of right angle
s = bisect_left(angles, base + pi/2 - ERR)
# t: end position of right angle
t = bisect_right(angles, base + pi/2 + ERR)
# u: end position of obtuse angle (180 degree)
u = bisect_right(angles, base + pi)
right += t - s
obtuse += u - t
total = N * (N-1) * (N-2) // 6
acute = total - (right + obtuse)
print(acute, right, obtuse)
if __name__ == '__main__':
main()
| 25.25
| 79
| 0.524752
|
86bf3ac029a06a86959ebea67aed6e6049873c5c
| 436
|
py
|
Python
|
05. Corner Detection/cornerDetection.py
|
codePerfectPlus/ComputerVision-Essentials
|
cfaa9e45ddc73cf6f3a6450f64a0d03268a60392
|
[
"MIT"
] | 15
|
2021-05-04T15:03:14.000Z
|
2022-03-20T11:57:55.000Z
|
05. Corner Detection/cornerDetection.py
|
codePerfectPlus/ComputerVision-Essentials
|
cfaa9e45ddc73cf6f3a6450f64a0d03268a60392
|
[
"MIT"
] | 12
|
2020-09-24T16:57:45.000Z
|
2020-10-23T15:13:06.000Z
|
05. Corner Detection/cornerDetection.py
|
codePerfectPlus/OpenCv-tutorial
|
cfaa9e45ddc73cf6f3a6450f64a0d03268a60392
|
[
"MIT"
] | 18
|
2020-09-21T13:01:37.000Z
|
2020-10-15T19:42:28.000Z
|
'''
Corner detection in Python
'''
import cv2
import numpy as np
img = cv2.imread('./Media/corner_detection.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
corners = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)
corners = np.int0(corners)
for corner in corners:
x, y = corner.ravel()
cv2.circle(img,(x,y), 3, 255, -1)
cv2.imshow('Corner', img)
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
| 18.956522
| 54
| 0.68578
|
b75373cf24a7344bf59b3c6fcb9c4c3969be6503
| 2,892
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_while_op.py
|
jichangjichang/Paddle
|
4fa3cee5499c6df0ad6043b0cfa220d09f2034e8
|
[
"Apache-2.0"
] | 9
|
2017-12-04T02:58:01.000Z
|
2020-12-03T14:46:30.000Z
|
python/paddle/fluid/tests/unittests/test_while_op.py
|
jichangjichang/Paddle
|
4fa3cee5499c6df0ad6043b0cfa220d09f2034e8
|
[
"Apache-2.0"
] | 7
|
2017-12-05T20:29:08.000Z
|
2018-10-15T08:57:40.000Z
|
python/paddle/fluid/tests/unittests/test_while_op.py
|
jichangjichang/Paddle
|
4fa3cee5499c6df0ad6043b0cfa220d09f2034e8
|
[
"Apache-2.0"
] | 6
|
2018-03-19T22:38:46.000Z
|
2019-11-01T22:28:27.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
import numpy
class TestWhileOp(unittest.TestCase):
def test_simple_forward(self):
d0 = layers.data(
"d0", shape=[10], append_batch_size=False, dtype='float32')
d1 = layers.data(
"d1", shape=[10], append_batch_size=False, dtype='float32')
d2 = layers.data(
"d2", shape=[10], append_batch_size=False, dtype='float32')
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i)
layers.array_write(d1, i, array=data_array)
i = layers.increment(i)
layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
array_len.stop_gradient = True
cond = layers.less_than(x=i, y=array_len)
while_op = layers.While(cond=cond)
with while_op.block():
d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True)
layers.array_write(result, i=i, array=mem_array)
layers.less_than(x=i, y=array_len, cond=cond)
sum_result = layers.array_read(array=mem_array, i=i)
loss = layers.mean(sum_result)
append_backward(loss)
cpu = core.CPUPlace()
exe = Executor(cpu)
d = []
for i in range(3):
d.append(numpy.random.random(size=[10]).astype('float32'))
outs = exe.run(feed={'d0': d[0],
'd1': d[1],
'd2': d[2]},
fetch_list=[sum_result])
self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
if __name__ == '__main__':
unittest.main()
| 34.843373
| 76
| 0.63278
|
78d4af749815ff4b9141dc92b0095bb308e95ae9
| 3,356
|
py
|
Python
|
pv_sim/run.py
|
ederlf/pv-sim
|
2c8a44d4a9552fdea1dbc9802d558f1f56a38b79
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
pv_sim/run.py
|
ederlf/pv-sim
|
2c8a44d4a9552fdea1dbc9802d558f1f56a38b79
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
pv_sim/run.py
|
ederlf/pv-sim
|
2c8a44d4a9552fdea1dbc9802d558f1f56a38b79
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import argparse
import sys
import threading
import meter
import msgbroker
import pv_gen
import pv
parser = argparse.ArgumentParser()
# Broker arguments
parser.add_argument(
"-b", "--broker", help="Chooses the msg broker. If not set, the simulator "
"uses rabbitmq", default="rabbitmq")
parser.add_argument(
"--broker_ip", help="Sets the IP location of the broker. If not set, "
"localhost (127.0.0.1) is used")
parser.add_argument(
"--broker_port", help="Sets the TCP port used to connect to the broker. If"
" not set, uses the default of the selected broker")
# Meter arguments
parser.add_argument("-d", "--duration", type=int,
help="Sets the duration of the simulated time, in seconds."
"If not set, defaults to one day (86400 seconds)",
default=86400)
parser.add_argument("-s", "--step", type=int,
help="Sets the advance of time and the interval of meter "
"messages, in seconds. If not set, the simulator uses 5 "
"seconds If not set, defaults to one day (86400 seconds)",
default=5)
parser.add_argument("--seed", type=int,
help="Sets the seed for the random number generation. If "
"not set, the simulator uses 42", default=42)
#!/usr/bin/env python
# SPDX-License-Identifier: BSD-3-Clause
# PVSim arguments
parser.add_argument("--pv_gen",
help="Sets the form of generation of PV values. By "
"default, it retrieves values from data files",
default="file")
parser.add_argument("--pv_gen_file",
help="Set the file used to retrieve PV values.")
parser.add_argument("--out_file",
help="Sets the output file of the PV simulator. By the "
"default, the data is saved in the execution directory, as"
" pv-data.csv",
default="pv-data.csv")
def main():
args = parser.parse_args()
# PV Thread
pv_broker = msgbroker.broker_factory(args.broker, args.broker_ip,
args.broker_port)
# Chooses a PV generator. For now there is only support to files.
pv_generator = None
if args.pv_gen == "file":
if args.pv_gen_file:
pv_generator = pv_gen.PVGenerator(pv_gen.PVFile(args.pv_gen_file))
else:
print("Error: The PV generator is of type \"file\". You must pass "
"a file name with --pv_gen_file FILENAME\n")
sys.exit(0)
if pv_generator:
pv_sim = pv.PVSim(pv_generator, pv_broker)
pv_thread = threading.Thread(target=pv_sim.run)
pv_thread.start()
else:
print("Error: You must define a value generator for the PV system. "
" --pv_gen\nTip: the simulator uses file by default, so you "
"might be missing the file name")
sys.exit(0)
# Meter Thread
meter_broker = msgbroker.broker_factory(args.broker, args.broker_ip,
args.broker_port)
meter_sim = meter.Meter(args.duration, args.step, meter_broker, args.seed)
meter_thread = threading.Thread(target=meter_sim.run)
meter_thread.start()
if __name__ == '__main__':
main()
| 36.879121
| 79
| 0.603993
|
21687b49ce611a320ee42a6a08b0d2c84ae77f2a
| 9,393
|
py
|
Python
|
python/GafferSceneUI/StandardOptionsUI.py
|
ivanimanishi/gaffer
|
7cfd79d2f20c25ed1d680730de9d6a2ee356dd4c
|
[
"BSD-3-Clause"
] | 1
|
2019-08-02T16:49:59.000Z
|
2019-08-02T16:49:59.000Z
|
python/GafferSceneUI/StandardOptionsUI.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | 2
|
2017-08-23T21:35:45.000Z
|
2018-01-29T08:59:33.000Z
|
python/GafferSceneUI/StandardOptionsUI.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Metadata
##########################################################################
def __cameraSummary( plug ) :
info = []
if plug["renderCamera"]["enabled"].getValue() :
info.append( plug["renderCamera"]["value"].getValue() )
if plug["renderResolution"]["enabled"].getValue() :
resolution = plug["renderResolution"]["value"].getValue()
info.append( "%dx%d" % ( resolution[0], resolution[1] ) )
if plug["pixelAspectRatio"]["enabled"].getValue() :
pixelAspectRatio = plug["pixelAspectRatio"]["value"].getValue()
info.append( "Aspect %s" % GafferUI.NumericWidget.valueToString( pixelAspectRatio ) )
if plug["resolutionMultiplier"]["enabled"].getValue() :
resolutionMultiplier = plug["resolutionMultiplier"]["value"].getValue()
info.append( "Mult %s" % GafferUI.NumericWidget.valueToString( resolutionMultiplier ) )
if plug["renderCropWindow"]["enabled"].getValue() :
crop = plug["renderCropWindow"]["value"].getValue()
info.append( "Crop %s,%s-%s,%s" % tuple( GafferUI.NumericWidget.valueToString( x ) for x in ( crop.min().x, crop.min().y, crop.max().x, crop.max().y ) ) )
if plug["overscan"]["enabled"].getValue() :
info.append( "Overscan %s" % ( "On" if plug["overscan"]["value"].getValue() else "Off" ) )
return ", ".join( info )
def __motionBlurSummary( plug ) :
info = []
if plug["cameraBlur"]["enabled"].getValue() :
info.append( "Camera " + ( "On" if plug["cameraBlur"]["value"].getValue() else "Off" ) )
if plug["transformBlur"]["enabled"].getValue() :
info.append( "Transform " + ( "On" if plug["transformBlur"]["value"].getValue() else "Off" ) )
if plug["deformationBlur"]["enabled"].getValue() :
info.append( "Deformation " + ( "On" if plug["deformationBlur"]["value"].getValue() else "Off" ) )
if plug["shutter"]["enabled"].getValue() :
info.append( "Shutter " + str( plug["shutter"]["value"].getValue() ) )
return ", ".join( info )
def __statisticsSummary( plug ) :
info = []
if plug["performanceMonitor"]["enabled"].getValue() :
info.append( "Performance Monitor " + ( "On" if plug["performanceMonitor"]["value"].getValue() else "Off" ) )
return ", ".join( info )
Gaffer.Metadata.registerNode(
GafferScene.StandardOptions,
"description",
"""
Specifies the standard options (global settings) for the
scene. These should be respected by all renderers.
""",
plugs = {
# Section summaries
"options" : [
"layout:section:Camera:summary", __cameraSummary,
"layout:section:Motion Blur:summary", __motionBlurSummary,
"layout:section:Statistics:summary", __statisticsSummary,
],
# Camera plugs
"options.renderCamera" : [
"description",
"""
The primary camera to be used for rendering. If this
is not specified, then a default orthographic camera
positioned at the origin is used.
""",
"layout:section", "Camera",
"label", "Camera",
],
"options.renderCamera.value" : [
"plugValueWidget:type", "GafferSceneUI.ScenePathPlugValueWidget",
"path:valid", True,
"scenePathPlugValueWidget:setNames", IECore.StringVectorData( [ "__cameras" ] ),
"scenePathPlugValueWidget:setsLabel", "Show only cameras",
],
"options.renderResolution" : [
"description",
"""
The resolution of the image to be rendered. Use the
resolution multiplier as a convenient way to temporarily
render at multiples of this resolution.
""",
"layout:section", "Camera",
"label", "Resolution",
],
"options.pixelAspectRatio" : [
"description",
"""
The aspect ratio (x/y) of the pixels in the rendered image.
""",
"layout:section", "Camera",
],
"options.resolutionMultiplier" : [
"description",
"""
Multiplier applied to the render resolution.
""",
"layout:section", "Camera",
],
"options.renderCropWindow" : [
"description",
"""
Limits the render to a region of the image. The rendered
image will have the same resolution as usual, but areas
outside the crop will be rendered black. Coordinates
range from 0,0 at the top left of the image to 1,1 at the
bottom right. The crop window tool in the viewer may be
used to set this interactively.
""",
"layout:section", "Camera",
"label", "Crop Window",
],
"options.overscan" : [
"description",
"""
Adds extra pixels to the sides of the rendered image.
This can be useful when camera shake or blur will be
added as a post process. This plug just enables overscan
as a whole - use the overscanTop, overscanBottom, overscanLeft
and overscanRight plugs to specify the amount of overscan
on each side of the image.
""",
"layout:section", "Camera",
],
"options.overscanTop" : [
"description",
"""
The amount of overscan at the top of the image. Specified
as a 0-1 proportion of the original image height.
""",
"layout:section", "Camera",
],
"options.overscanBottom" : [
"description",
"""
The amount of overscan at the bottom of the image. Specified
as a 0-1 proportion of the original image height.
""",
"layout:section", "Camera",
],
"options.overscanLeft" : [
"description",
"""
The amount of overscan at the left of the image. Specified
as a 0-1 proportion of the original image width.
""",
"layout:section", "Camera",
],
"options.overscanRight" : [
"description",
"""
The amount of overscan at the right of the image. Specified
as a 0-1 proportion of the original image width.
""",
"layout:section", "Camera",
],
# Motion blur plugs
"options.cameraBlur" : [
"description",
"""
Whether or not camera motion is taken into
account in the renderered image. To specify the
number of segments to use for camera motion, use
a StandardAttributes node filtered for the camera.
""",
"layout:section", "Motion Blur",
"label", "Camera",
],
"options.transformBlur" : [
"description",
"""
Whether or not transform motion is taken into
account in the renderered image. To specify the
number of transform segments to use for each
object in the scene, use a StandardAttributes node
with appropriate filters.
""",
"layout:section", "Motion Blur",
"label", "Transform",
],
"options.deformationBlur" : [
"description",
"""
Whether or not deformation motion is taken into
account in the renderered image. To specify the
number of deformation segments to use for each
object in the scene, use a StandardAttributes node
with appropriate filters.
""",
"layout:section", "Motion Blur",
"label", "Deformation",
],
"options.shutter" : [
"description",
"""
The interval over which the camera shutter is open.
Measured in frames, and specified relative to the
frame being rendered.
""",
"layout:section", "Motion Blur",
],
"options.sampleMotion" : [
"description",
"""
Whether to actually render motion blur. Disabling this
setting while motion blur is set up produces a render where
there is no blur, but there is accurate motion information.
Useful for rendering motion vector passes.
""",
"layout:section", "Motion Blur",
],
# Statistics plugs
"options.performanceMonitor" : [
"description",
"""
Enables a performance monitor and uses it to output
statistics about scene generation performance.
""",
"layout:section", "Statistics",
],
}
)
| 26.91404
| 156
| 0.657617
|
2e4c54557388af0158f44984dfe160d3a67a5603
| 1,272
|
py
|
Python
|
pollbot/display/misc.py
|
tigerdar004/RweddingPoll
|
8617c63798dbebe6aee3ea7bd61d995a588fc048
|
[
"MIT"
] | null | null | null |
pollbot/display/misc.py
|
tigerdar004/RweddingPoll
|
8617c63798dbebe6aee3ea7bd61d995a588fc048
|
[
"MIT"
] | null | null | null |
pollbot/display/misc.py
|
tigerdar004/RweddingPoll
|
8617c63798dbebe6aee3ea7bd61d995a588fc048
|
[
"MIT"
] | 1
|
2020-11-06T01:54:41.000Z
|
2020-11-06T01:54:41.000Z
|
"""Display helper for misc stuff."""
from pollbot.i18n import i18n
from pollbot.models import Poll
from pollbot.telegram.keyboard import (
get_help_keyboard,
get_poll_list_keyboard,
)
def get_help_text_and_keyboard(user, current_category):
"""Create the help message depending on the currently selected help category."""
categories = [
"creation",
"settings",
"notifications",
"management",
"languages",
"bugs",
"feature",
]
text = i18n.t(f"misc.help.{current_category}", locale=user.locale)
keyboard = get_help_keyboard(user, categories, current_category)
return text, keyboard
def get_poll_list(session, user, closed=False):
"""Get the a list of polls for the user."""
polls = (
session.query(Poll)
.filter(Poll.user == user)
.filter(Poll.created.is_(True))
.filter(Poll.closed.is_(closed))
.all()
)
if len(polls) == 0 and closed:
return i18n.t("list.no_closed_polls", locale=user.locale), None
elif len(polls) == 0:
return i18n.t("list.no_polls", locale=user.locale), None
text = i18n.t("list.polls", locale=user.locale)
keyboard = get_poll_list_keyboard(polls)
return text, keyboard
| 27.06383
| 84
| 0.647013
|
eed3363f9ac69e7487e2e9cfded01882afee7aad
| 318
|
py
|
Python
|
server/application/__init__.py
|
c-jordi/pdf2data
|
daa9f8b58e6603063b411ba7fba89054c924c5bc
|
[
"MIT"
] | null | null | null |
server/application/__init__.py
|
c-jordi/pdf2data
|
daa9f8b58e6603063b411ba7fba89054c924c5bc
|
[
"MIT"
] | 1
|
2022-01-09T12:06:40.000Z
|
2022-01-09T12:06:40.000Z
|
server/application/__init__.py
|
c-jordi/pdf2data
|
daa9f8b58e6603063b411ba7fba89054c924c5bc
|
[
"MIT"
] | null | null | null |
__version__ = '0.1.0'
def init_database():
"""
Create a database
"""
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
engine = create_engine("sqlite:///db.sqlite")
if not database_exists(engine.url):
create_database(engine.url)
| 21.2
| 65
| 0.691824
|
61654d0d5ba34a716dc57dc18432e7eb9d5225f1
| 670
|
py
|
Python
|
leetcode/191.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
leetcode/191.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
leetcode/191.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
'''
191. Number of 1 Bits
Write a function that takes an unsigned integer and returns the number of ’1' bits it has (also known as the Hamming weight).
For example, the 32-bit integer ’11' has binary representation 00000000000000000000000000001011, so the function should return 3.
'''
class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
que=[]
offset = 2
while n!=0:
if n%offset == 1:
que.append(1)
n=n//2
return len(que)
if __name__ == '__main__':
solution = Solution()
t1=11
print(solution.hammingWeight(t1))
| 23.928571
| 129
| 0.591045
|
5ac834a1352119ce433f4d48f73b80157a5eba0e
| 10,198
|
py
|
Python
|
pywikibot/families/wikipedia_family.py
|
magul/pywikibot-core
|
4874edc0f3f314108bcd25486d9df817da8457fe
|
[
"MIT"
] | 2
|
2017-09-16T09:12:31.000Z
|
2017-09-19T19:12:32.000Z
|
pywikibot/families/wikipedia_family.py
|
magul/pywikibot-core
|
4874edc0f3f314108bcd25486d9df817da8457fe
|
[
"MIT"
] | 56
|
2016-12-13T04:57:36.000Z
|
2017-11-24T10:05:41.000Z
|
pywikibot/families/wikipedia_family.py
|
magul/pywikibot-core
|
4874edc0f3f314108bcd25486d9df817da8457fe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Family module for Wikipedia."""
#
# (C) Pywikibot team, 2004-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from pywikibot import family
# The Wikimedia family that is known as Wikipedia, the Free Encyclopedia
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family module for Wikipedia."""
name = 'wikipedia'
closed_wikis = [
# See https://noc.wikimedia.org/conf/highlight.php?file=closed.dblist
'aa', 'advisory', 'cho', 'ho', 'hz', 'ii', 'kj', 'kr', 'mh', 'mo',
'mus', 'ng', 'quality', 'strategy', 'ten', 'usability'
]
removed_wikis = [
# See https://noc.wikimedia.org/conf/highlight.php?file=deleted.dblist
'dk', 'ru-sib', 'tlh', 'tokipona', 'zh_cn', 'zh_tw',
]
def __init__(self):
"""Constructor."""
self.languages_by_size = [
'en', 'ceb', 'sv', 'de', 'fr', 'nl', 'ru', 'it', 'es', 'war', 'pl',
'vi', 'ja', 'pt', 'zh', 'uk', 'sr', 'fa', 'ca', 'ar', 'no', 'sh',
'fi', 'hu', 'id', 'ko', 'cs', 'ro', 'ms', 'tr', 'eu', 'eo', 'bg',
'hy', 'da', 'zh-min-nan', 'sk', 'min', 'kk', 'he', 'lt', 'hr',
'et', 'ce', 'sl', 'be', 'gl', 'el', 'nn', 'simple', 'az', 'uz',
'la', 'ur', 'hi', 'th', 'vo', 'ka', 'ta', 'cy', 'tg', 'mk', 'tl',
'mg', 'oc', 'lv', 'ky', 'bs', 'tt', 'new', 'sq', 'te', 'pms',
'zh-yue', 'br', 'be-tarask', 'azb', 'ast', 'bn', 'ml', 'ht', 'jv',
'lb', 'mr', 'sco', 'af', 'ga', 'pnb', 'is', 'ba', 'cv', 'fy', 'su',
'sw', 'my', 'lmo', 'an', 'yo', 'ne', 'pa', 'gu', 'io', 'nds',
'scn', 'bpy', 'als', 'bar', 'ku', 'kn', 'ckb', 'ia', 'qu', 'mn',
'arz', 'bat-smg', 'gd', 'wa', 'nap', 'si', 'yi', 'bug', 'am',
'cdo', 'or', 'map-bms', 'fo', 'mzn', 'hsb', 'xmf', 'mai', 'li',
'sah', 'sa', 'vec', 'ilo', 'os', 'mrj', 'hif', 'mhr', 'bh', 'eml',
'roa-tara', 'ps', 'diq', 'pam', 'sd', 'hak', 'nso', 'se',
'zh-classical', 'bcl', 'ace', 'mi', 'nah', 'nds-nl', 'szl', 'wuu',
'gan', 'rue', 'frr', 'vls', 'km', 'bo', 'vep', 'glk', 'sc', 'crh',
'fiu-vro', 'co', 'lrc', 'tk', 'kv', 'csb', 'gv', 'as', 'myv',
'lad', 'so', 'zea', 'nv', 'ay', 'udm', 'lez', 'ie', 'stq', 'kw',
'nrm', 'pcd', 'mwl', 'rm', 'koi', 'ab', 'gom', 'ug', 'lij',
'cbk-zam', 'gn', 'mt', 'fur', 'dsb', 'sn', 'dv', 'ang', 'ln',
'ext', 'kab', 'ksh', 'frp', 'lo', 'gag', 'dty', 'pag', 'pi', 'olo',
'av', 'xal', 'pfl', 'bxr', 'haw', 'krc', 'pap', 'kaa', 'rw', 'pdc',
'bjn', 'to', 'nov', 'ha', 'kl', 'arc', 'jam', 'kbd', 'tyv', 'tpi',
'tet', 'ig', 'ki', 'na', 'roa-rup', 'lbe', 'jbo', 'ty', 'mdf',
'za', 'kg', 'lg', 'wo', 'bi', 'srn', 'tcy', 'zu', 'chr', 'kbp',
'ltg', 'sm', 'om', 'xh', 'rmy', 'tn', 'cu', 'pih', 'rn', 'chy',
'tw', 'tum', 'ts', 'st', 'got', 'pnt', 'ss', 'ch', 'bm', 'fj',
'ady', 'iu', 'ny', 'atj', 'ee', 'ks', 'ak', 'ik', 've', 'sg', 'ff',
'dz', 'ti', 'cr', 'din',
]
# Sites we want to edit but not count as real languages
self.test_codes = ['test', 'test2']
super(Family, self).__init__()
# Templates that indicate a category redirect
# Redirects to these templates are automatically included
self.category_redirect_templates = {
'_default': (),
'ar': ('تحويل تصنيف',),
'arz': (u'تحويل تصنيف',),
'bs': ('Category redirect',),
'cs': (u'Zastaralá kategorie',),
'da': (u'Kategoriomdirigering',),
'en': (u'Category redirect',),
'es': (u'Categoría redirigida',),
'eu': ('Kategoria birzuzendu',),
'fa': ('رده بهتر',),
'fr': ('Catégorie redirigée',),
'gv': (u'Aastiurey ronney',),
'hi': ('श्रेणी अनुप्रेषित',),
'hu': ('Kat-redir',),
'id': ('Alih kategori',),
'ja': (u'Category redirect',),
'ko': (u'분류 넘겨주기',),
'mk': (u'Премести категорија',),
'ml': (u'Category redirect',),
'ms': ('Pengalihan kategori',),
'mt': ('Rindirizzament kategorija',),
'no': ('Kategoriomdirigering',),
'pt': ('Redirecionamento de categoria',),
'ro': (u'Redirect categorie',),
'ru': ('Переименованная категория',),
'sco': ('Category redirect',),
'sh': ('Prekat',),
'simple': ('Category redirect',),
'sl': ('Preusmeritev kategorije',),
'sr': ('Category redirect',),
'sq': ('Kategori e zhvendosur',),
'sv': ('Kategoriomdirigering',),
'tl': (u'Category redirect',),
'tr': ('Kategori yönlendirme',),
'uk': (u'Categoryredirect',),
'vi': ('Đổi hướng thể loại',),
'yi': (u'קאטעגאריע אריבערפירן',),
'zh': ('分类重定向',),
'zh-yue': ('分類彈去',),
}
# families that redirect their interlanguage links here.
self.interwiki_forwarded_from = [
'commons',
'incubator',
'meta',
'species',
'strategy',
'test',
'wikimania'
]
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/BPI#Current_implementation
# & https://meta.wikimedia.org/wiki/Special:WikiSets/2
self.cross_allowed = [
'ab', 'ace', 'ady', 'af', 'ak', 'als', 'am', 'an', 'ang', 'ar',
'arc', 'arz', 'as', 'ast', 'av', 'ay', 'az', 'ba', 'bar',
'bat-smg', 'bcl', 'be', 'be-tarask', 'bg', 'bh', 'bi', 'bjn', 'bm',
'bo', 'bpy', 'bug', 'bxr', 'ca', 'cbk-zam', 'cdo', 'ce', 'ceb',
'ch', 'chr', 'chy', 'ckb', 'co', 'cr', 'crh', 'cs', 'csb', 'cu',
'cv', 'cy', 'da', 'diq', 'dsb', 'dz', 'ee', 'el', 'eml', 'en',
'eo', 'et', 'eu', 'ext', 'fa', 'ff', 'fi', 'fj', 'fo', 'frp',
'frr', 'fur', 'ga', 'gag', 'gan', 'gd', 'glk', 'gn', 'got', 'gu',
'gv', 'ha', 'hak', 'haw', 'he', 'hi', 'hif', 'hr', 'hsb', 'ht',
'hu', 'hy', 'ia', 'ie', 'ig', 'ik', 'ilo', 'io', 'iu', 'ja', 'jam',
'jbo', 'jv', 'ka', 'kaa', 'kab', 'kdb', 'kg', 'ki', 'kk', 'kl',
'km', 'kn', 'ko', 'koi', 'krc', 'ks', 'ku', 'kv', 'kw', 'ky', 'la',
'lad', 'lb', 'lbe', 'lez', 'lg', 'li', 'lij', 'lmo', 'ln', 'lo',
'lt', 'ltg', 'lv', 'map-bms', 'mdf', 'mg', 'mhr', 'mi', 'mk', 'ml',
'mn', 'mrj', 'ms', 'mwl', 'my', 'myv', 'mzn', 'na', 'nah', 'nap',
'nds-nl', 'ne', 'new', 'nl', 'no', 'nov', 'nrm', 'nso', 'nv', 'ny',
'oc', 'olo', 'om', 'or', 'os', 'pa', 'pag', 'pam', 'pap', 'pdc',
'pfl', 'pi', 'pih', 'pms', 'pnb', 'pnt', 'ps', 'qu', 'rm', 'rmy',
'rn', 'roa-rup', 'roa-tara', 'ru', 'rue', 'rw', 'sa', 'sah', 'sc',
'scn', 'sco', 'sd', 'se', 'sg', 'sh', 'si', 'simple', 'sk', 'sm',
'sn', 'so', 'srn', 'ss', 'st', 'stq', 'su', 'sv', 'sw', 'szl',
'ta', 'tcy', 'te', 'tet', 'tg', 'th', 'ti', 'tk', 'tl', 'tn', 'to',
'tpi', 'tr', 'ts', 'tt', 'tum', 'tw', 'ty', 'tyv', 'udm', 'ug',
'uz', 've', 'vec', 'vep', 'vls', 'vo', 'wa', 'war', 'wo', 'wuu',
'xal', 'xh', 'xmf', 'yi', 'yo', 'za', 'zea', 'zh', 'zh-classical',
'zh-min-nan', 'zh-yue', 'zu',
]
# On most Wikipedias page names must start with a capital letter,
# but some languages don't use this.
self.nocapitalize = ['jbo']
# Languages that used to be coded in iso-8859-1
self.latin1old = [
'de', 'en', 'et', 'es', 'ia', 'la', 'af', 'cs', 'fr', 'pt', 'sl',
'bs', 'fy', 'vi', 'lt', 'fi', 'it', 'no', 'simple', 'gl', 'eu',
'nds', 'co', 'mi', 'mr', 'id', 'lv', 'sw', 'tt', 'uk', 'vo', 'ga',
'na', 'es', 'nl', 'da', 'dk', 'sv', 'test']
# Subpages for documentation.
# TODO: List is incomplete, to be completed for missing languages.
# TODO: Remove comments for appropriate pages
self.doc_subpages = {
'_default': ((u'/doc', ),
['ar', 'bn', 'cs', 'da', 'en', 'es', 'hr',
'hu', 'id', 'ilo', 'ja', 'ms',
'pt', 'ro', 'ru', 'simple', 'sh', 'vi', 'zh']
),
'bs': ('/dok', ),
'ca': (u'/ús', ),
'de': (u'Doku', u'/Meta'),
'dsb': (u'/Dokumentacija', ),
'eu': (u'txantiloi dokumentazioa', u'/dok'),
'fa': (u'/doc', u'/توضیحات'),
# fi: no idea how to handle this type of subpage at :Metasivu:
'fi': ((), ),
'fr': (u'/documentation', ),
'hsb': (u'/Dokumentacija', ),
'it': (u'/Man', ),
'ka': (u'/ინფო', ),
'ko': (u'/설명문서', ),
'no': (u'/dok', ),
'nn': (u'/dok', ),
'pl': (u'/opis', ),
'sk': (u'/Dokumentácia', ),
'sr': ('/док', ),
'sv': (u'/dok', ),
'uk': (u'/Документація', ),
}
def get_known_families(self, site):
"""Override the family interwiki prefixes for each site."""
# In Swedish Wikipedia 's:' is part of page title not a family
# prefix for 'wikisource'.
if site.code == 'sv':
d = self.known_families.copy()
d.pop('s')
d['src'] = 'wikisource'
return d
else:
return self.known_families
def code2encodings(self, code):
"""Return a list of historical encodings for a specific site."""
# Historic compatibility
if code == 'pl':
return 'utf-8', 'iso8859-2'
if code == 'ru':
return 'utf-8', 'iso8859-5'
if code in self.latin1old:
return 'utf-8', 'iso-8859-1'
return self.code2encoding(code)
| 45.936937
| 79
| 0.415474
|
120555bdfb31d9fc323b5ebfbfe022e108da8d6e
| 1,126
|
py
|
Python
|
python/app/blog/views.py
|
templain/php-python-opencensus-example
|
806463ac6c9ab3784c3339bedf5ac49fd96368cc
|
[
"MIT"
] | null | null | null |
python/app/blog/views.py
|
templain/php-python-opencensus-example
|
806463ac6c9ab3784c3339bedf5ac49fd96368cc
|
[
"MIT"
] | null | null | null |
python/app/blog/views.py
|
templain/php-python-opencensus-example
|
806463ac6c9ab3784c3339bedf5ac49fd96368cc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import django_filters
from rest_framework import viewsets, filters
from .models import User, Entry
from .serializer import UserSerializer, EntrySerializer
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
import logging
class UserViewSet(viewsets.ViewSet):
def list(self, request):
queryset = User.objects.all()
serializer = UserSerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = User.objects.all()
user = get_object_or_404(queryset, pk=pk)
serializer = UserSerializer(user)
return Response(serializer.data)
class EntryViewSet(viewsets.ModelViewSet):
def list(self, request):
queryset = Entry.objects.all()
serializer = EntrySerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk=None):
queryset = Entry.objects.all()
entry = get_object_or_404(queryset, pk=pk)
serializer = EntrySerializer(entry)
return Response(serializer.data)
| 32.171429
| 57
| 0.715808
|
fc2445c94759f1cde88122f72ffbd15d19912fc3
| 308
|
py
|
Python
|
project/com/vo/BankVO.py
|
sahilshah8141/ChequeClearanceSystem
|
f02efeb45b950be8bb34a35a399a358e7eeed03b
|
[
"Apache-2.0"
] | null | null | null |
project/com/vo/BankVO.py
|
sahilshah8141/ChequeClearanceSystem
|
f02efeb45b950be8bb34a35a399a358e7eeed03b
|
[
"Apache-2.0"
] | null | null | null |
project/com/vo/BankVO.py
|
sahilshah8141/ChequeClearanceSystem
|
f02efeb45b950be8bb34a35a399a358e7eeed03b
|
[
"Apache-2.0"
] | null | null | null |
from wtforms import *
class BankVO:
bankId = IntegerField
bankName = StringField
bankCode = StringField
bankContact = StringField
bank_LoginId = IntegerField
# bankEmail = StringField
bank_CityId = IntegerField
bank_AreaId = IntegerField
bankActiveStatus = StringField
| 20.533333
| 34
| 0.724026
|
5ddd5c5fe6470de5972bcc86a05cf85938328d33
| 3,177
|
py
|
Python
|
flexget/tests/test_exec.py
|
Jeremiad/Flexget
|
73e6e062eeb126eaec8737a6d6c94ccf3d250b03
|
[
"MIT"
] | 1,322
|
2015-01-01T22:00:25.000Z
|
2022-03-30T05:37:46.000Z
|
flexget/tests/test_exec.py
|
Jeremiad/Flexget
|
73e6e062eeb126eaec8737a6d6c94ccf3d250b03
|
[
"MIT"
] | 2,384
|
2015-01-01T04:23:15.000Z
|
2022-03-31T01:06:43.000Z
|
flexget/tests/test_exec.py
|
Jeremiad/Flexget
|
73e6e062eeb126eaec8737a6d6c94ccf3d250b03
|
[
"MIT"
] | 617
|
2015-01-02T15:15:07.000Z
|
2022-03-15T12:29:31.000Z
|
import os
import sys
import pytest
class TestExec:
__tmp__ = True
config = (
"""
templates:
global:
set:
temp_dir: '__tmp__'
accept_all: yes
tasks:
replace_from_entry:
mock:
- {title: 'replace'}
- {title: 'replace with spaces'}
exec: """
+ sys.executable
+ """ exec.py "{{temp_dir}}" "{{title}}"
test_adv_format:
mock:
- {title: entry1, location: '/path/with spaces', quotefield: "with'quote"}
exec:
on_output:
for_entries: """
+ sys.executable
+ """ exec.py "{{temp_dir}}" "{{title}}" "{{location}}" """
+ """"/the/final destinaton/" "a {{quotefield}}" "/a hybrid{{location}}"
test_auto_escape:
mock:
- {title: entry2, quotes: single ' double", otherchars: '% a $a! ` *'}
exec:
auto_escape: yes
on_output:
for_entries: """
+ sys.executable
+ """ exec.py "{{temp_dir}}" "{{title}}" "{{quotes}}" "/start/{{quotes}}" "{{otherchars}}"
"""
)
def test_replace_from_entry(self, execute_task, tmpdir):
task = execute_task('replace_from_entry')
assert len(task.accepted) == 2, "not all entries were accepted"
for entry in task.accepted:
assert tmpdir.join(entry['title']).exists(), (
"exec.py did not create a file for %s" % entry['title']
)
def test_adv_format(self, execute_task, tmpdir):
task = execute_task('test_adv_format')
for entry in task.accepted:
with tmpdir.join(entry['title']).open('r') as infile:
line = infile.readline().rstrip('\n')
assert line == '/path/with spaces', '%s != /path/with spaces' % line
line = infile.readline().rstrip('\n')
assert line == '/the/final destinaton/', '%s != /the/final destinaton/' % line
line = infile.readline().rstrip('\n')
assert line == 'a with\'quote', '%s != a with\'quote' % line
line = infile.readline().rstrip('\n')
assert line == '/a hybrid/path/with spaces', (
'%s != /a hybrid/path/with spaces' % line
)
# TODO: This doesn't work on linux.
@pytest.mark.skip(reason='This doesn\'t work on linux')
def test_auto_escape(self, execute_task):
task = execute_task('test_auto_escape')
for entry in task.accepted:
with open(os.path.join(self.__tmp__, entry['title']), 'r') as infile:
line = infile.readline().rstrip('\n')
assert line == 'single \' double\"', '%s != single \' double\"' % line
line = infile.readline().rstrip('\n')
assert line == '/start/single \' double\"', (
'%s != /start/single \' double\"' % line
)
line = infile.readline().rstrip('\n')
assert line == '% a $a! ` *', '%s != % a $a! ` *' % line
| 38.743902
| 98
| 0.491029
|
010a0c0a00a176bdabb561cc2a141aa0d1bbf139
| 12,572
|
py
|
Python
|
tools/manifest/tests/test_manifest.py
|
QuantumDecaydev/wpt
|
604bdb79a265e54c398052a6e28557d26b23ce61
|
[
"BSD-3-Clause"
] | null | null | null |
tools/manifest/tests/test_manifest.py
|
QuantumDecaydev/wpt
|
604bdb79a265e54c398052a6e28557d26b23ce61
|
[
"BSD-3-Clause"
] | null | null | null |
tools/manifest/tests/test_manifest.py
|
QuantumDecaydev/wpt
|
604bdb79a265e54c398052a6e28557d26b23ce61
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import mock
import hypothesis as h
import hypothesis.strategies as hs
import pytest
from .. import manifest, item, utils
def SourceFileWithTest(path, hash, cls, *args):
s = mock.Mock(rel_path=path, hash=hash)
test = cls(s, utils.rel_path_to_url(path), *args)
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
def SourceFileWithTests(path, hash, cls, variants):
s = mock.Mock(rel_path=path, hash=hash)
tests = [cls(s, item[0], *item[1:]) for item in variants]
s.manifest_items = mock.Mock(return_value=(cls.item_type, tests))
return s
@hs.composite
def rel_dir_file_path(draw):
length = draw(hs.integers(min_value=1, max_value=20))
if length == 1:
return "a"
else:
remaining = length - 2
if os.path.sep == "/":
alphabet = "a/"
elif os.path.sep == "\\":
alphabet = "a/\\"
else:
assert False, "uhhhh, this platform is weird"
mid = draw(hs.text(alphabet=alphabet, min_size=remaining, max_size=remaining))
return os.path.normcase("a" + mid + "a")
@hs.composite
def sourcefile_strategy(draw):
item_classes = [item.TestharnessTest, item.RefTest, item.RefTestNode,
item.ManualTest, item.Stub, item.WebDriverSpecTest,
item.ConformanceCheckerTest, item.SupportFile]
cls = draw(hs.sampled_from(item_classes))
path = draw(rel_dir_file_path())
hash = draw(hs.text(alphabet="0123456789abcdef", min_size=40, max_size=40))
s = mock.Mock(rel_path=path, hash=hash)
if cls in (item.RefTest, item.RefTestNode):
ref_path = draw(rel_dir_file_path())
h.assume(path != ref_path)
ref_eq = draw(hs.sampled_from(["==", "!="]))
test = cls(s, utils.rel_path_to_url(path), [(utils.rel_path_to_url(ref_path), ref_eq)])
elif cls is item.SupportFile:
test = cls(s)
else:
test = cls(s, utils.rel_path_to_url(path))
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10, max_size=1000,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.ConformanceCheckerTest)])
def test_manifest_to_json(s):
m = manifest.Manifest()
assert m.update((item, True) for item in s) is True
json_str = m.to_json()
loaded = manifest.Manifest.from_json("/", json_str)
assert list(loaded) == list(m)
assert loaded.to_json() == json_str
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.TestharnessTest)])
@h.example([SourceFileWithTest("a", "0"*40, item.RefTest, [("/aa", "==")])])
def test_manifest_idempotent(s):
m = manifest.Manifest()
assert m.update((item, True) for item in s) is True
m1 = list(m)
assert m.update((item, True) for item in s) is False
assert list(m) == m1
def test_manifest_to_json_forwardslash():
m = manifest.Manifest()
s = SourceFileWithTest("a/b", "0"*40, item.TestharnessTest)
assert m.update([(s, True)]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 5,
'url_base': '/',
'items': {
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
def test_manifest_to_json_backslash():
m = manifest.Manifest()
s = SourceFileWithTest("a\\b", "0"*40, item.TestharnessTest)
if os.path.sep == "\\":
assert m.update([(s, True)]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 5,
'url_base': '/',
'items': {
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
else:
with pytest.raises(ValueError):
# one of these must raise ValueError
# the first must return True if it doesn't raise
assert m.update([(s, True)]) is True
m.to_json()
def test_manifest_from_json_backslash():
json_obj = {
'paths': {
'a\\b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 5,
'url_base': '/',
'items': {
'testharness': {
'a\\b': [['/a/b', {}]]
}
}
}
with pytest.raises(ValueError):
manifest.Manifest.from_json("/", json_obj)
def test_reftest_computation_chain():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
m.update([(s1, True), (s2, True)])
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_add():
m = manifest.Manifest()
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
test2 = s2.manifest_items()[1][0]
assert m.update([(s2, True)]) is True
assert list(m) == [("reftest", test2.path, {test2})]
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
test1 = s1.manifest_items()[1][0]
# s2's hash is unchanged, but it has gone from a test to a node
assert m.update([(s1, True), (s2, True)]) is True
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_remove():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
assert m.update([(s1, True), (s2, True)]) is True
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
# s2's hash is unchanged, but it has gone from a node to a test
assert m.update([(s2, True)]) is True
assert list(m) == [("reftest", test2.path, {test2})]
def test_reftest_computation_chain_update_test_type():
m = manifest.Manifest()
s1 = SourceFileWithTest("test", "0"*40, item.RefTest, [("/test-ref", "==")])
assert m.update([(s1, True)]) is True
test1 = s1.manifest_items()[1][0]
assert list(m) == [("reftest", test1.path, {test1})]
# test becomes a testharness test (hash change because that is determined
# based on the file contents). The updated manifest should not includes the
# old reftest.
s2 = SourceFileWithTest("test", "1"*40, item.TestharnessTest)
assert m.update([(s2, True)]) is True
test2 = s2.manifest_items()[1][0]
assert list(m) == [("testharness", test2.path, {test2})]
def test_reftest_computation_chain_update_node_change():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTestNode, [("/test3", "==")])
assert m.update([(s1, True), (s2, True)]) is True
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2})]
#test2 changes to support type
s2 = SourceFileWithTest("test2", "1"*40, item.SupportFile)
assert m.update([(s1, True), (s2, True)]) is True
test3 = s2.manifest_items()[1][0]
assert list(m) == [("reftest", test1.path, {test1}),
("support", test3.path, {test3})]
def test_iterpath():
m = manifest.Manifest()
# This has multiple test types from the same file, which isn't really supported,
# so pretend they have different hashes
sources = [SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test1-ref", "==")]),
SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test2-ref", "==")]),
SourceFileWithTests("test2", "1"*40, item.TestharnessTest, [("/test2-1.html",),
("/test2-2.html",)]),
SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
m.update([(s, True) for s in sources])
assert set(item.url for item in m.iterpath("test2")) == set(["/test2",
"/test2-1.html",
"/test2-2.html"])
assert set(m.iterpath("missing")) == set()
def test_filter():
m = manifest.Manifest()
# This has multiple test types from the same file, which isn't really supported,
# so pretend they have different hashes
sources = [SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test1-ref", "==")]),
SourceFileWithTest("test2", "1"*40, item.RefTest, [("/test2-ref", "==")]),
SourceFileWithTests("test2", "0"*40, item.TestharnessTest, [("/test2-1.html",),
("/test2-2.html",)]),
SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
m.update([(s, True) for s in sources])
json = m.to_json()
def filter(it):
for test in it:
if test[0] in ["/test2-2.html", "/test3"]:
yield test
filtered_manifest = manifest.Manifest.from_json("/", json, types=["testharness"], meta_filters=[filter])
actual = [
(ty, path, [test.id for test in tests])
for (ty, path, tests) in filtered_manifest
]
assert actual == [
("testharness", "test2", ["/test2-2.html"]),
("testharness", "test3", ["/test3"]),
]
def test_reftest_node_by_url():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
m.update([(s1, True), (s2, True)])
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert m.reftest_nodes_by_url == {"/test1": test1,
"/test2": test2_node}
m._reftest_nodes_by_url = None
assert m.reftest_nodes_by_url == {"/test1": test1,
"/test2": test2_node}
def test_no_update():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.TestharnessTest)
s2 = SourceFileWithTest("test2", "0"*40, item.TestharnessTest)
m.update([(s1, True), (s2, True)])
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
assert list(m) == [("testharness", test1.path, {test1}),
("testharness", test2.path, {test2})]
s1_1 = SourceFileWithTest("test1", "1"*40, item.ManualTest)
m.update([(s1_1, True), (s2.rel_path, False)])
test1_1 = s1_1.manifest_items()[1][0]
assert list(m) == [("manual", test1_1.path, {test1_1}),
("testharness", test2.path, {test2})]
def test_no_update_delete():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.TestharnessTest)
s2 = SourceFileWithTest("test2", "0"*40, item.TestharnessTest)
m.update([(s1, True), (s2, True)])
test1 = s1.manifest_items()[1][0]
s1_1 = SourceFileWithTest("test1", "1"*40, item.ManualTest)
m.update([(s1_1.rel_path, False)])
assert list(m) == [("testharness", test1.path, {test1})]
def test_update_from_json():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.TestharnessTest)
s2 = SourceFileWithTest("test2", "0"*40, item.TestharnessTest)
m.update([(s1, True), (s2, True)])
json_str = m.to_json()
m = manifest.Manifest.from_json("/", json_str)
m.update([(s1, True)])
test1 = s1.manifest_items()[1][0]
assert list(m) == [("testharness", test1.path, {test1})]
| 31.508772
| 108
| 0.575962
|
b8706e1676e4c696ef2702d01539dce42e9e14b9
| 5,791
|
py
|
Python
|
tests/scripts/thread-cert/Cert_9_2_09_PendingPartition.py
|
MarekPorwisz/openthread-zep
|
acd72411235a0630a4efaeac8969419d15fecdaa
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T11:20:13.000Z
|
2022-03-18T11:20:13.000Z
|
tests/scripts/thread-cert/Cert_9_2_09_PendingPartition.py
|
MarekPorwisz/openthread-zep
|
acd72411235a0630a4efaeac8969419d15fecdaa
|
[
"BSD-3-Clause"
] | 3
|
2017-03-30T22:36:13.000Z
|
2020-05-29T15:04:28.000Z
|
tests/scripts/thread-cert/Cert_9_2_09_PendingPartition.py
|
MarekPorwisz/openthread-zep
|
acd72411235a0630a4efaeac8969419d15fecdaa
|
[
"BSD-3-Clause"
] | 1
|
2016-07-05T14:44:21.000Z
|
2016-07-05T14:44:21.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
CHANNEL_INIT = 19
PANID_INIT = 0xface
CHANNEL_FINAL = 19
PANID_FINAL = 0xabcd
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
class Cert_9_2_09_PendingPartition(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'active_dataset': {
'timestamp': 10,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [LEADER]
},
LEADER: {
'active_dataset': {
'timestamp': 10,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rsdn',
'partition_id': 0xffffffff,
'router_selection_jitter': 1,
'whitelist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'active_dataset': {
'timestamp': 10,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [LEADER, ROUTER2]
},
ROUTER2: {
'active_dataset': {
'timestamp': 10,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rsdn',
'network_id_timeout': 100,
'router_selection_jitter': 1,
'whitelist': [ROUTER1]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=30,
active_timestamp=210,
delay_timer=500000,
channel=20,
panid=0xafce,
)
self.simulator.go(5)
self.nodes[LEADER].remove_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER1].remove_whitelist(self.nodes[LEADER].get_addr64())
self.simulator.go(140)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER2].get_state(), 'leader')
self.nodes[ROUTER2].send_mgmt_pending_set(
pending_timestamp=50,
active_timestamp=410,
delay_timer=200000,
channel=CHANNEL_FINAL,
panid=PANID_FINAL,
)
self.simulator.go(5)
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.simulator.go(200)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.assertEqual(self.nodes[ROUTER2].get_state(), 'router')
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[LEADER].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER1].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[ROUTER2].get_panid(), PANID_FINAL)
self.assertEqual(self.nodes[COMMISSIONER].get_channel(), CHANNEL_FINAL)
self.assertEqual(self.nodes[LEADER].get_channel(), CHANNEL_FINAL)
self.assertEqual(self.nodes[ROUTER1].get_channel(), CHANNEL_FINAL)
self.assertEqual(self.nodes[ROUTER2].get_channel(), CHANNEL_FINAL)
ipaddrs = self.nodes[ROUTER2].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| 35.09697
| 79
| 0.631497
|
59d18fc57799f1824506438666897b5526d34140
| 8,520
|
py
|
Python
|
src/lib/Bcfg2/Server/Lint/Comments.py
|
stpierre/bcfg2
|
363ad4fd2b36febbbe6b766dac9e76c572048e08
|
[
"mpich2"
] | null | null | null |
src/lib/Bcfg2/Server/Lint/Comments.py
|
stpierre/bcfg2
|
363ad4fd2b36febbbe6b766dac9e76c572048e08
|
[
"mpich2"
] | null | null | null |
src/lib/Bcfg2/Server/Lint/Comments.py
|
stpierre/bcfg2
|
363ad4fd2b36febbbe6b766dac9e76c572048e08
|
[
"mpich2"
] | null | null | null |
import os
import lxml.etree
import Bcfg2.Server.Lint
from Bcfg2.Server import XI, XI_NAMESPACE
from Bcfg2.Server.Plugins.Cfg.CfgPlaintextGenerator import CfgPlaintextGenerator
from Bcfg2.Server.Plugins.Cfg.CfgGenshiGenerator import CfgGenshiGenerator
from Bcfg2.Server.Plugins.Cfg.CfgCheetahGenerator import CfgCheetahGenerator
from Bcfg2.Server.Plugins.Cfg.CfgInfoXML import CfgInfoXML
class Comments(Bcfg2.Server.Lint.ServerPlugin):
""" check files for various required headers """
def __init__(self, *args, **kwargs):
Bcfg2.Server.Lint.ServerPlugin.__init__(self, *args, **kwargs)
self.config_cache = {}
def Run(self):
self.check_bundles()
self.check_properties()
self.check_metadata()
self.check_cfg()
self.check_probes()
@classmethod
def Errors(cls):
return {"unexpanded-keywords":"warning",
"keywords-not-found":"warning",
"comments-not-found":"warning",
"broken-xinclude-chain":"warning"}
def required_keywords(self, rtype):
""" given a file type, fetch the list of required VCS keywords
from the bcfg2-lint config """
return self.required_items(rtype, "keyword")
def required_comments(self, rtype):
""" given a file type, fetch the list of required comments
from the bcfg2-lint config """
return self.required_items(rtype, "comment")
def required_items(self, rtype, itype):
""" given a file type and item type (comment or keyword),
fetch the list of required items from the bcfg2-lint config """
if itype not in self.config_cache:
self.config_cache[itype] = {}
if rtype not in self.config_cache[itype]:
rv = []
global_item = "global_%ss" % itype
if global_item in self.config:
rv.extend(self.config[global_item].split(","))
item = "%s_%ss" % (rtype.lower(), itype)
if item in self.config:
if self.config[item]:
rv.extend(self.config[item].split(","))
else:
# config explicitly specifies nothing
rv = []
self.config_cache[itype][rtype] = rv
return self.config_cache[itype][rtype]
def check_bundles(self):
""" check bundle files for required headers """
if 'Bundler' in self.core.plugins:
for bundle in self.core.plugins['Bundler'].entries.values():
xdata = None
rtype = ""
try:
xdata = lxml.etree.XML(bundle.data)
rtype = "bundler"
except (lxml.etree.XMLSyntaxError, AttributeError):
xdata = lxml.etree.parse(bundle.template.filepath).getroot()
rtype = "sgenshi"
self.check_xml(bundle.name, xdata, rtype)
def check_properties(self):
""" check properties files for required headers """
if 'Properties' in self.core.plugins:
props = self.core.plugins['Properties']
for propfile, pdata in props.store.entries.items():
if os.path.splitext(propfile)[1] == ".xml":
self.check_xml(pdata.name, pdata.xdata, 'properties')
def check_metadata(self):
""" check metadata files for required headers """
if self.has_all_xincludes("groups.xml"):
self.check_xml(os.path.join(self.metadata.data, "groups.xml"),
self.metadata.groups_xml.data,
"metadata")
if self.has_all_xincludes("clients.xml"):
self.check_xml(os.path.join(self.metadata.data, "clients.xml"),
self.metadata.clients_xml.data,
"metadata")
def check_cfg(self):
""" check Cfg files and info.xml files for required headers """
if 'Cfg' in self.core.plugins:
for entryset in self.core.plugins['Cfg'].entries.values():
for entry in entryset.entries.values():
rtype = None
if isinstance(entry, CfgGenshiGenerator):
rtype = "tgenshi"
elif isinstance(entry, CfgPlaintextGenerator):
rtype = "cfg"
elif isinstance(entry, CfgCheetahGenerator):
rtype = "tcheetah"
elif isinstance(entry, CfgInfoXML):
self.check_xml(entry.infoxml.name,
entry.infoxml.pnode.data,
"infoxml")
continue
if rtype:
self.check_plaintext(entry.name, entry.data, rtype)
def check_probes(self):
""" check probes for required headers """
if 'Probes' in self.core.plugins:
for probe in self.core.plugins['Probes'].probes.entries.values():
self.check_plaintext(probe.name, probe.data, "probes")
def check_xml(self, filename, xdata, rtype):
""" check generic XML files for required headers """
self.check_lines(filename,
[str(el)
for el in xdata.getiterator(lxml.etree.Comment)],
rtype)
def check_plaintext(self, filename, data, rtype):
""" check generic plaintex files for required headers """
self.check_lines(filename, data.splitlines(), rtype)
def check_lines(self, filename, lines, rtype):
""" generic header check for a set of lines """
if self.HandlesFile(filename):
# found is trivalent:
# False == not found
# None == found but not expanded
# True == found and expanded
found = dict((k, False) for k in self.required_keywords(rtype))
for line in lines:
# we check for both '$<keyword>:' and '$<keyword>$' to see
# if the keyword just hasn't been expanded
for (keyword, status) in found.items():
if not status:
if '$%s:' % keyword in line:
found[keyword] = True
elif '$%s$' % keyword in line:
found[keyword] = None
unexpanded = [keyword for (keyword, status) in found.items()
if status is None]
if unexpanded:
self.LintError("unexpanded-keywords",
"%s: Required keywords(s) found but not expanded: %s" %
(filename, ", ".join(unexpanded)))
missing = [keyword for (keyword, status) in found.items()
if status is False]
if missing:
self.LintError("keywords-not-found",
"%s: Required keywords(s) not found: $%s$" %
(filename, "$, $".join(missing)))
# next, check for required comments. found is just
# boolean
found = dict((k, False) for k in self.required_comments(rtype))
for line in lines:
for (comment, status) in found.items():
if not status:
found[comment] = comment in line
missing = [comment for (comment, status) in found.items()
if status is False]
if missing:
self.LintError("comments-not-found",
"%s: Required comments(s) not found: %s" %
(filename, ", ".join(missing)))
def has_all_xincludes(self, mfile):
""" return true if self.files includes all XIncludes listed in
the specified metadata type, false otherwise"""
if self.files is None:
return True
else:
path = os.path.join(self.metadata.data, mfile)
if path in self.files:
xdata = lxml.etree.parse(path)
for el in xdata.findall('./%sinclude' % XI_NAMESPACE):
if not self.has_all_xincludes(el.get('href')):
self.LintError("broken-xinclude-chain",
"Broken XInclude chain: could not include %s" % path)
return False
return True
| 43.030303
| 92
| 0.538732
|
045f565445756c396db17f0733b8e39589fb434d
| 1,069
|
py
|
Python
|
etsyProducts/urls.py
|
tugberkozkara/etsy-products
|
07fc8d3ce5b7d4806a7c2dedf6cc1bd868dcf878
|
[
"MIT"
] | null | null | null |
etsyProducts/urls.py
|
tugberkozkara/etsy-products
|
07fc8d3ce5b7d4806a7c2dedf6cc1bd868dcf878
|
[
"MIT"
] | null | null | null |
etsyProducts/urls.py
|
tugberkozkara/etsy-products
|
07fc8d3ce5b7d4806a7c2dedf6cc1bd868dcf878
|
[
"MIT"
] | null | null | null |
"""etsyProducts URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from main.views import deleteProduct, homePage, collectionPage, productDetail
urlpatterns = [
path('admin/', admin.site.urls),
path('', homePage, name="homepage"),
path('collection/', collectionPage, name="collection"),
path('product/<int:id>/', productDetail, name="detail"),
path('delete-product/<int:id>/', deleteProduct, name="delete-product")
]
| 39.592593
| 77
| 0.710945
|
504d48cdcd52d45cb7c7bd079a809066d66f5766
| 776
|
py
|
Python
|
tests/test_integration.py
|
trickhub/echelonpy
|
75d04eb83116fabf6d86451055ca8ef4e79929bd
|
[
"MIT"
] | null | null | null |
tests/test_integration.py
|
trickhub/echelonpy
|
75d04eb83116fabf6d86451055ca8ef4e79929bd
|
[
"MIT"
] | null | null | null |
tests/test_integration.py
|
trickhub/echelonpy
|
75d04eb83116fabf6d86451055ca8ef4e79929bd
|
[
"MIT"
] | null | null | null |
from os import path
from freezegun import freeze_time
from nose.tools import assert_equals
from echelonpy.__main__ import generate_output
@freeze_time("2017-04-23T12:57:42Z")
def test_single_stage():
_integration_test('single_stage')
@freeze_time("2017-04-23T12:59:00Z")
def test_multi_stage():
_integration_test('multi_stage')
def _integration_test(fixture_name):
input_path = path.join(path.dirname(__file__), 'fixtures', '{}.csv'.format(fixture_name))
expected_output_path = path.join(path.dirname(__file__), 'fixtures', '{}.tcx'.format(fixture_name))
actual_tcx = generate_output(input_path)
with open(expected_output_path, "r") as expected_file:
expected_tcx = expected_file.read()
assert_equals(expected_tcx, actual_tcx)
| 28.740741
| 103
| 0.756443
|
428f699275fb7ddbd6c7237eb5166b999e6cbe22
| 558
|
py
|
Python
|
alembic/versions/6f1e7ecaa9fd_add_stages_authors.py
|
scifanchain/api
|
eadb46625971bdc9ffe1893fa634907d54e9919f
|
[
"MIT"
] | 2
|
2021-06-22T14:13:33.000Z
|
2021-07-04T18:18:37.000Z
|
alembic/versions/6f1e7ecaa9fd_add_stages_authors.py
|
scifanchain/api
|
eadb46625971bdc9ffe1893fa634907d54e9919f
|
[
"MIT"
] | null | null | null |
alembic/versions/6f1e7ecaa9fd_add_stages_authors.py
|
scifanchain/api
|
eadb46625971bdc9ffe1893fa634907d54e9919f
|
[
"MIT"
] | null | null | null |
"""add stages_authors
Revision ID: 6f1e7ecaa9fd
Revises: cdd8c82b1f69
Create Date: 2021-07-03 18:45:46.796481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6f1e7ecaa9fd'
down_revision = 'cdd8c82b1f69'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 19.241379
| 65
| 0.689964
|
6b21f0df35d958116672f8882f1cf8b3d09452a0
| 669
|
py
|
Python
|
deposit/migrations/0003_auto_20201207_0159.py
|
10sujitkhanal/forzza
|
d51332fe0655f85deb5acd612754f0b0ed9d2f3f
|
[
"MIT"
] | null | null | null |
deposit/migrations/0003_auto_20201207_0159.py
|
10sujitkhanal/forzza
|
d51332fe0655f85deb5acd612754f0b0ed9d2f3f
|
[
"MIT"
] | null | null | null |
deposit/migrations/0003_auto_20201207_0159.py
|
10sujitkhanal/forzza
|
d51332fe0655f85deb5acd612754f0b0ed9d2f3f
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-12-07 01:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('deposit', '0002_auto_20201205_0154'),
]
operations = [
migrations.RemoveField(
model_name='deposit',
name='deposit_amount_es',
),
migrations.RemoveField(
model_name='deposit',
name='deposit_amount_ru',
),
migrations.RemoveField(
model_name='deposit',
name='review_es',
),
migrations.RemoveField(
model_name='deposit',
name='review_ru',
),
]
| 22.3
| 47
| 0.54858
|
d68916de96de1cb94856855011ee6e15328ed730
| 2,636
|
py
|
Python
|
logstash/handler_http.py
|
MWedl/python-logstash
|
d318f3c3a91576a6d86522506aad5e04c479ee60
|
[
"MIT"
] | 1
|
2019-06-27T19:39:15.000Z
|
2019-06-27T19:39:15.000Z
|
logstash/handler_http.py
|
MWedl/python-logstash
|
d318f3c3a91576a6d86522506aad5e04c479ee60
|
[
"MIT"
] | null | null | null |
logstash/handler_http.py
|
MWedl/python-logstash
|
d318f3c3a91576a6d86522506aad5e04c479ee60
|
[
"MIT"
] | 1
|
2019-07-03T14:36:09.000Z
|
2019-07-03T14:36:09.000Z
|
from logging import NullHandler
import requests
from requests.auth import HTTPBasicAuth
from logstash import formatter
class HTTPLogstashHandler(NullHandler, object):
"""Python logging handler for Logstash. Sends events over HTTP.
:param host: The host of the logstash server.
:param port: The port of the logstash server (default 80).
:param ssl: Use SSL for logstash server (default False).
:param message_type: The type of the message (default logstash).
:param fqdn; Indicates whether to show fully qualified domain name or not (default False).
:param tags: list of tags for a logger (default is None).
:param verify: verify ssl (default is True)
:param username: basic_auth user (default is None)
:param password: basic_auth user (default is None)
:param limit_stacktrace: limit characters for stacktraces
:param limit_string_fields: limit characters for string fields
:param limit_containers: limit length of containers (dict, list, set)
"""
def __init__(self, host, port=80, ssl=False, message_type='logstash', tags=None, fqdn=False, verify=True,
username=None, password=None, limit_stacktrace=0, limit_string_fields=0, limit_containers=0):
super(NullHandler, self).__init__()
self.formatter = formatter.LogstashFormatter(message_type, tags, fqdn, limit_stacktrace=limit_stacktrace,
limit_string_fields=limit_string_fields,
limit_containers=limit_containers)
if username and password:
self.auth = HTTPBasicAuth(username, password)
else:
self.auth = None
self.ssl = ssl
self.verify = verify
self.host = host
self.port = port
def emit(self, record):
if type(record) == bytes:
record = record.decode("UTF-8")
scheme = "http"
if self.ssl:
scheme = "https"
url = "{}://{}:{}".format(scheme, self.host, self.port)
try:
headers = {'Content-type': 'application/json'}
r = requests.post(url, auth=self.auth, data=record, verify=self.verify, headers=headers)
if r.status_code != requests.codes.ok:
self.handleError(record)
except Exception:
self.handleError(record)
def handle(self, record):
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(self.formatter.format(record))
finally:
self.release()
return rv
| 37.657143
| 113
| 0.624431
|
d1ec33b632b3c97892194bf506095de341f9bc74
| 6,959
|
py
|
Python
|
java/javaentity-dto-splitter/main.py
|
kinow/dork-scripts
|
a4fa7980a8cdff41df806bb4d4b70f7b4ac89349
|
[
"CC-BY-4.0"
] | 1
|
2016-08-07T07:45:24.000Z
|
2016-08-07T07:45:24.000Z
|
java/javaentity-dto-splitter/main.py
|
kinow/dork-scripts
|
a4fa7980a8cdff41df806bb4d4b70f7b4ac89349
|
[
"CC-BY-4.0"
] | null | null | null |
java/javaentity-dto-splitter/main.py
|
kinow/dork-scripts
|
a4fa7980a8cdff41df806bb4d4b70f7b4ac89349
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
# requirements: javalang
import os
import re
import sys
import javalang
"""
Because there is no way :)
"""
# Patterns we do not want in an Entity
ENTITY_FILTER_PATTERNS = [
'^import .*javax\.xml\.bind.*',
'^@Xml.*'
]
# Patterns we do not want in a DTO
DTO_FILTER_PATTERNS = [
'^import .*javax\.persistence.*',
'^@Table.*',
'^@Entity.*',
'^@Id.*',
'^@Column.*',
'^@GeneratedValue.*',
'^@Transient.*',
'^@Temporal.*',
'^@Inheritance.*',
'^@DiscriminatorColumn.*'
]
# Patterns we do not want in a DTO
class ParseResult(object):
def __init__(self):
self.body = ''
self.file_name = ''
class Parser(object):
def __init__(self):
self.class_name = 'SomeEntity'
def filter_line(self, line, pattern_list):
"""
Parse a line, applying a blacklist list of patterns.
"""
for pattern in pattern_list:
m = re.search(pattern, line)
if m:
return None
# Try to find the class name
if self.class_name == 'SomeEntity':
m = re.search('.*class\s+([^\s]+)\s+.*', line)
if m:
self.class_name = m.group(1).strip()
return line
def parse(file):
raise Exception('Not implemented!')
def get_output_file_name(self):
return self.class_name + '.java'
class EntityParser(Parser):
def parse(self, class_file):
parse_result = ParseResult()
entity_contents = []
for line in class_file:
line = line.strip()
result = self.filter_line(line, ENTITY_FILTER_PATTERNS)
if result is not None:
entity_contents.append(result)
content = '\n'.join(entity_contents)
parse_result.body = content
parse_result.file_name = self.get_output_file_name()
return parse_result
class DtoParser(Parser):
# From: https://stackoverflow.com/questions/12410242/python-capitalize-first-letter-only
def _upperfirst(self, x):
return x[0].upper() + x[1:]
def _lowerfirst(self, x):
return x[0].lower() + x[1:]
def parse(self, class_file):
parse_result = ParseResult()
dto_contents = []
for line in class_file:
line = line.strip()
result = self.filter_line(line, DTO_FILTER_PATTERNS)
if result is not None:
dto_contents.append(result)
initial_content = '\n'.join(dto_contents)
tree = javalang.parse.parse(initial_content)
lines = []
# package...
lines.append('package ' + tree.package.name + ';')
lines.append('')
# imports...
for imp in tree.imports:
lines.append('import ' + imp.path + ';')
lines.append('')
# annotations...
class_decl = tree.types[0]
for ann in class_decl.annotations:
annotation = '@'+ann.name+'('
elems_values = []
if ann.element is not None:
if type(ann.element) is javalang.tree.MemberReference:
elems_values.append(ann.element.qualifier + '.' + ann.element.member)
else:
for elem in ann.element:
elems_values.append(elem.name + '=' + elem.value.value)
annotation += ','.join(elems_values)
annotation += ')'
lines.append(annotation)
# class...
lines.append('public class ' + class_decl.name + 'PO extends BasePO<' + class_decl.name + '> implements Serializable {')
lines.append('')
# constructor...
lines.append('public ' + class_decl.name + 'PO(' + class_decl.name + ' entity) {')
lines.append('super(entity);')
lines.append('}')
lines.append('')
# transform the fields into methods...
for field in class_decl.fields:
has_xml_annotation = False
for ann in field.annotations:
m = re.search('^Xml.*', ann.name)
if m:
annotation = '@'+ann.name+'('
elems_values = []
if ann.element is not None:
if type(ann.element) is javalang.tree.MemberReference:
elems_values.append(ann.element.qualifier + '.' + ann.element.member)
else:
for elem in ann.element:
elems_values.append(elem.name + '=' + elem.value.value)
annotation += ','.join(elems_values)
annotation += ')'
lines.append(annotation)
has_xml_annotation = True
if has_xml_annotation:
lines.append('@JsonGetter')
lines.append('public ' + field.type.name + ' get' + self._upperfirst(field.declarators[0].name) + '() {')
lines.append(' return entity.get' + self._upperfirst(field.declarators[0].name) + '();')
lines.append('}')
lines.append('')
lines.append('@JsonSetter')
lines.append('public void set' + self._upperfirst(field.declarators[0].name) + '(' + field.type.name + ' ' + self._lowerfirst(field.declarators[0].name) + ') {')
lines.append(' entity.set' + self._upperfirst(field.declarators[0].name) + '(' + self._lowerfirst(field.declarators[0].name) + ');')
lines.append('}')
lines.append('')
# close class
lines.append('}')
lines.append('')
content = '\n'.join(lines)
parse_result.body = content
parse_result.file_name = self.get_output_file_name()
return parse_result
def get_output_file_name(self):
return self.class_name + 'PO.java'
def main():
"""
The input for the program is a FILE that contains a Java class.
The Java class contains a Hibernate Entity. Besides an Entity, the class
may also be a DTO.
The program will create a file for the class with only the Hibernate Entity
related fields and methods. And will create another file for the DTO.
"""
# From: https://stackoverflow.com/questions/7165749/open-file-in-a-relative-location-in-python
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.join(script_dir, 'class.txt')
entity_parser = EntityParser()
dto_parser = DtoParser()
with open(abs_file_path, 'r') as f:
parse_result = entity_parser.parse(f)
with open(parse_result.file_name, 'w') as o:
o.write(parse_result.body)
with open(abs_file_path, 'r') as f:
parse_result = dto_parser.parse(f)
with open(parse_result.file_name, 'w') as o:
o.write(parse_result.body)
if __name__ == '__main__':
main()
sys.exit(0)
| 31.346847
| 177
| 0.558845
|
90c0f235eae4d495be2b248654935d42184ce72a
| 4,894
|
py
|
Python
|
mmdet/models/necks/fpn.py
|
tjsongzw/mmdetection
|
1cbc88e3f528fa27489b9d68595b47ddb5cb1f34
|
[
"Apache-2.0"
] | 16
|
2021-03-02T07:41:01.000Z
|
2022-03-14T08:55:45.000Z
|
mmdet/models/necks/fpn.py
|
superlxt/mmdetection
|
0e1f3b0d42ee7e1623322d76538aac8510abf6c2
|
[
"Apache-2.0"
] | 2
|
2022-01-06T20:54:13.000Z
|
2022-02-24T03:50:51.000Z
|
mmdet/models/necks/fpn.py
|
superlxt/mmdetection
|
0e1f3b0d42ee7e1623322d76538aac8510abf6c2
|
[
"Apache-2.0"
] | 2
|
2021-05-26T19:23:35.000Z
|
2022-01-06T20:30:24.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from ..utils import ConvModule
from ..registry import NECKS
@NECKS.register_module
class FPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
normalize=None,
activation=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.with_bias = normalize is None
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
normalize=normalize,
bias=self.with_bias,
activation=self.activation,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
normalize=normalize,
bias=self.with_bias,
activation=self.activation,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
normalize=normalize,
bias=self.with_bias,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
laterals[i - 1] += F.interpolate(
laterals[i], scale_factor=2, mode='nearest')
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
else:
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
for i in range(used_backbone_levels + 1, self.num_outs):
# BUG: we should add relu before each extra conv
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 36.796992
| 79
| 0.552718
|
c47295e1350f8571b300abc6cfe8ffe8ce27b77d
| 5,864
|
py
|
Python
|
ape_etherscan/client.py
|
unparalleled-js/ape-etherscan
|
a970233426c6ac4793340077a1105800fa1f0747
|
[
"Apache-2.0"
] | null | null | null |
ape_etherscan/client.py
|
unparalleled-js/ape-etherscan
|
a970233426c6ac4793340077a1105800fa1f0747
|
[
"Apache-2.0"
] | null | null | null |
ape_etherscan/client.py
|
unparalleled-js/ape-etherscan
|
a970233426c6ac4793340077a1105800fa1f0747
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
from dataclasses import dataclass
from typing import Dict, Iterator, List, Optional, Union
import requests
from ape.utils import USER_AGENT
from ape_etherscan.exceptions import UnsupportedEcosystemError, get_request_error
from ape_etherscan.utils import API_KEY_ENV_VAR_NAME
def get_etherscan_uri(ecosystem_name: str, network_name: str):
if ecosystem_name == "ethereum":
return (
f"https://{network_name}.etherscan.io"
if network_name != "mainnet"
else "https://etherscan.io"
)
elif ecosystem_name == "fantom":
return (
f"https://{network_name}.ftmscan.com"
if network_name != "opera"
else "https://ftmscan.com"
)
raise UnsupportedEcosystemError(ecosystem_name)
def get_etherscan_api_uri(ecosystem_name: str, network_name: str):
if ecosystem_name == "ethereum":
return (
f"https://api-{network_name}.etherscan.io/api"
if network_name != "mainnet"
else "https://api.etherscan.io/api"
)
elif ecosystem_name == "fantom":
return (
f"https://api-{network_name}.ftmscan.com"
if network_name != "opera"
else "https://api.ftmscan.com"
)
raise UnsupportedEcosystemError(ecosystem_name)
@dataclass
class SourceCodeResponse:
abi: str = ""
name: str = "unknown"
class _APIClient:
DEFAULT_HEADERS = {"User-Agent": USER_AGENT}
def __init__(self, ecosystem_name: str, network_name: str, module_name: str):
self._ecosystem_name = ecosystem_name
self._network_name = network_name
self._module_name = module_name
@property
def base_uri(self) -> str:
return get_etherscan_api_uri(self._ecosystem_name, self._network_name)
@property
def base_params(self) -> Dict:
return {"module": self._module_name}
def _get(self, params: Optional[Dict] = None) -> Union[List, Dict]:
params = self.__authorize(params)
return self._request("GET", params=params, headers=self.DEFAULT_HEADERS)
def _post(self, json_dict: Optional[Dict] = None) -> Dict:
json_dict = self.__authorize(json_dict)
return self._request("POST", json=json_dict, headers=self.DEFAULT_HEADERS) # type: ignore
def _request(self, method: str, *args, **kwargs) -> Union[List, Dict]:
response = requests.request(method.upper(), self.base_uri, *args, **kwargs)
response.raise_for_status()
response_data = response.json()
if response_data.get("isError", 0) or response_data.get("message", "") == "NOTOK":
raise get_request_error(response)
result = response_data.get("result")
if result and isinstance(result, str):
# Sometimes, the response is a stringified JSON object or list
result = json.loads(result)
return result
def __authorize(self, params_or_data: Optional[Dict] = None) -> Optional[Dict]:
api_key = os.environ.get(API_KEY_ENV_VAR_NAME)
if api_key and (not params_or_data or "apikey" not in params_or_data):
params_or_data = params_or_data or {}
params_or_data["apikey"] = api_key
return params_or_data
class ContractClient(_APIClient):
def __init__(self, ecosystem_name: str, network_name: str, address: str):
self._address = address
super().__init__(ecosystem_name, network_name, "contract")
def get_source_code(self) -> SourceCodeResponse:
params = {**self.base_params, "action": "getsourcecode", "address": self._address}
result = self._get(params=params) or []
if len(result) != 1:
return SourceCodeResponse()
data = result[0]
abi = data.get("ABI") or ""
name = data.get("ContractName") or "unknown"
return SourceCodeResponse(abi, name)
class AccountClient(_APIClient):
def __init__(self, ecosystem_name: str, network_name: str, address: str):
self._address = address
super().__init__(ecosystem_name, network_name, "account")
def get_all_normal_transactions(
self,
start_block: Optional[int] = None,
end_block: Optional[int] = None,
offset: int = 100,
sort: str = "asc",
) -> Iterator[Dict]:
page_num = 1
last_page_results = offset # Start at offset to trigger iteration
while last_page_results == offset:
page = self._get_page_of_normal_transactions(
page_num, start_block, end_block, offset=offset, sort=sort
)
if len(page):
yield from page
last_page_results = len(page)
page_num += 1
def _get_page_of_normal_transactions(
self,
page: int,
start_block: Optional[int] = None,
end_block: Optional[int] = None,
offset: int = 100,
sort: str = "asc",
) -> List[Dict]:
params = {
**self.base_params,
"action": "txlist",
"address": self._address,
"startblock": start_block,
"endblock": end_block,
"page": page,
"offset": offset,
"sort": sort,
}
result = self._get(params=params)
return result # type: ignore
class ClientFactory:
def __init__(self, ecosystem_name: str, network_name: str):
self._ecosystem_name = ecosystem_name
self._network_name = network_name
def get_contract_client(self, contract_address: str) -> ContractClient:
return ContractClient(self._ecosystem_name, self._network_name, contract_address)
def get_account_client(self, account_address: str) -> AccountClient:
return AccountClient(self._ecosystem_name, self._network_name, account_address)
| 33.129944
| 98
| 0.636426
|
c83ae0a0a71d06f3c3e3bcb29db2f4d649cb22c6
| 5,366
|
py
|
Python
|
milk/unsupervised/gaussianmixture.py
|
luispedro/milk
|
abc2a28b526c199414d42c0a26092938968c3caf
|
[
"MIT"
] | 284
|
2015-01-21T09:07:55.000Z
|
2022-03-19T07:39:17.000Z
|
milk/unsupervised/gaussianmixture.py
|
pursh2002/milk
|
abc2a28b526c199414d42c0a26092938968c3caf
|
[
"MIT"
] | 6
|
2015-04-22T15:17:44.000Z
|
2018-04-22T16:06:24.000Z
|
milk/unsupervised/gaussianmixture.py
|
pursh2002/milk
|
abc2a28b526c199414d42c0a26092938968c3caf
|
[
"MIT"
] | 109
|
2015-02-03T07:39:59.000Z
|
2022-01-16T00:16:13.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2011, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
import numpy as np
from numpy import log, pi, array
from numpy.linalg import det, inv
from .kmeans import residual_sum_squares, centroid_errors
__all__ = [
'BIC',
'AIC',
'log_likelihood',
'nr_parameters',
]
def log_likelihood(fmatrix,assignments,centroids,model='one_variance',covs=None):
'''
log_like = log_likelihood(feature_matrix, assignments, centroids, model='one_variance', covs=None)
Compute the log likelihood of feature_matrix[i] being generated from centroid[i]
'''
N,q = fmatrix.shape
k = len(centroids)
if model == 'one_variance':
Rss = residual_sum_squares(fmatrix,assignments,centroids)
#sigma2=Rss/N
return -N/2.*log(2*pi*Rss/N)-N/2
elif model == 'diagonal_covariance':
errors = centroid_errors(fmatrix,assignments,centroids)
errors *= errors
errors = errors.sum(1)
Rss = np.zeros(k)
counts = np.zeros(k)
for i in range(fmatrix.shape[0]):
c = assignments[i]
Rss[c] += errors[i]
counts[c] += 1
sigma2s = Rss/(counts+(counts==0))
return -N/2.*log(2*pi) -N/2. -1/2.*np.sum(counts*np.log(sigma2s+(counts==0)))
elif model == 'full_covariance':
res = -N*q/2.*log(2*pi)
for k in range(len(centroids)):
diff = (fmatrix[assignments == k] - centroids[k])
if covs is None:
covm = np.cov(diff.T)
else:
covm = covs[k]
if covm.shape == ():
covm = np.matrix([[covm]])
icov = np.matrix(inv(covm))
diff = np.matrix(diff)
Nk = diff.shape[0]
res += -Nk/2.*log(det(covm)) + \
-.5 * (diff * icov * diff.T).diagonal().sum()
return res
raise ValueError("log_likelihood: cannot handle model '%s'" % model)
def nr_parameters(fmatrix,k,model='one_variance'):
'''
nr_p = nr_parameters(fmatrix, k, model='one_variance')
Compute the number of parameters for a model of k clusters on
Parameters
----------
fmatrix : 2d-array
feature matrix
k : integer
nr of clusters
model : str
one of 'one_variance' (default), 'diagonal_covariance', or 'full_covariance'
Returns
-------
nr_p : integer
Number of parameters
'''
N,q = fmatrix.shape
if model == 'one_variance':
return k*q+1
elif model == 'diagonal_covariance':
return k*(q+1)
elif model == 'full_covariance':
return k*+q*q
raise ValueError("milk.unsupervised.gaussianmixture.nr_parameters: cannot handle model '%s'" % model)
def _compute(type, fmatrix, assignments, centroids, model='one_variance', covs=None):
N,q = fmatrix.shape
k = len(centroids)
log_like = log_likelihood(fmatrix, assignments, centroids, model, covs)
n_param = nr_parameters(fmatrix,k,model)
if type == 'BIC':
return -2*log_like + n_param * log(N)
elif type == 'AIC':
return -2*log_like + 2 * n_param
else:
assert False
def BIC(fmatrix, assignments, centroids, model='one_variance', covs=None):
'''
B = BIC(fmatrix, assignments, centroids, model='one_variance', covs={From Data})
Compute Bayesian Information Criterion
Parameters
----------
fmatrix : 2d-array
feature matrix
assignments : 2d-array
Centroid assignments
centroids : sequence
Centroids
model : str, optional
one of
'one_variance'
All features share the same variance parameter sigma^2. Default
'full_covariance'
Estimate a full covariance matrix or use covs[i] for centroid[i]
covs : sequence or matrix, optional
Covariance matrices. If None, then estimate from the data. If scalars
instead of matrices are given, then s stands for sI (i.e., the diagonal
matrix with s along the diagonal).
Returns
-------
B : float
BIC value
See Also
--------
AIC
'''
return _compute('BIC', fmatrix, assignments, centroids, model, covs)
def AIC(fmatrix,assignments,centroids,model='one_variance',covs=None):
'''
A = AIC(fmatrix,assignments,centroids,model)
Compute Akaike Information Criterion
Parameters
----------
fmatrix : 2d-array
feature matrix
assignments : 2d-array
Centroid assignments
centroids : sequence
Centroids
model : str, optional
one of
'one_variance'
All features share the same variance parameter sigma^2. Default
'full_covariance'
Estimate a full covariance matrix or use covs[i] for centroid[i]
covs : sequence, optional
Covariance matrices. If None, then estimate from the data. If scalars
instead of matrices are given, then s stands for sI (i.e., the diagonal
matrix with s along the diagonal).
Returns
-------
B : float
AIC value
See Also
--------
BIC
'''
return _compute('AIC', fmatrix, assignments, centroids, model, covs)
| 29.322404
| 105
| 0.608647
|
f4a8a8a65fe5af1fb0e357c070bcc57435b12516
| 110
|
py
|
Python
|
django_extended/models/__init__.py
|
dalou/django-extended
|
a7ba952ea7089cfb319b4615ae098579c9ab14f9
|
[
"BSD-3-Clause"
] | 1
|
2015-12-14T17:16:04.000Z
|
2015-12-14T17:16:04.000Z
|
django_extended/models/__init__.py
|
dalou/django-extended
|
a7ba952ea7089cfb319b4615ae098579c9ab14f9
|
[
"BSD-3-Clause"
] | null | null | null |
django_extended/models/__init__.py
|
dalou/django-extended
|
a7ba952ea7089cfb319b4615ae098579c9ab14f9
|
[
"BSD-3-Clause"
] | null | null | null |
from .user import User
from ..emailing.models import *
from ..flatpages.models import *
from .tree import Tree
| 27.5
| 32
| 0.772727
|
9e1c703956fb66322ef009f7f13ff1a5f8925597
| 1,878
|
py
|
Python
|
electrum_atom/plot.py
|
rootSig/electrum-atom
|
338b0dbcde96335b92d1301bf4fdd0854937c8cf
|
[
"MIT"
] | 4
|
2021-02-14T08:48:36.000Z
|
2021-04-23T11:14:41.000Z
|
electrum_atom/plot.py
|
bitcoin-atom/electrum-atom
|
156d4d54c5493bcda930efcb972a0c600c36a11d
|
[
"MIT"
] | 1
|
2019-11-12T03:09:15.000Z
|
2019-11-12T03:09:15.000Z
|
electrum_atom/plot.py
|
bitcoin-atom/electrum-atom
|
156d4d54c5493bcda930efcb972a0c600c36a11d
|
[
"MIT"
] | 1
|
2018-09-11T23:30:16.000Z
|
2018-09-11T23:30:16.000Z
|
import datetime
from collections import defaultdict
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as md
from .i18n import _
from .bitcoin import COIN
class NothingToPlotException(Exception):
def __str__(self):
return _("Nothing to plot.")
def plot_history(history):
if len(history) == 0:
raise NothingToPlotException()
hist_in = defaultdict(int)
hist_out = defaultdict(int)
for item in history:
if not item['confirmations']:
continue
if item['timestamp'] is None:
continue
value = item['value'].value/COIN
date = item['date']
datenum = int(md.date2num(datetime.date(date.year, date.month, 1)))
if value > 0:
hist_in[datenum] += value
else:
hist_out[datenum] -= value
f, axarr = plt.subplots(2, sharex=True)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax = plt.gca()
plt.ylabel('BCA')
plt.xlabel('Month')
xfmt = md.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(xfmt)
axarr[0].set_title('Monthly Volume')
xfmt = md.DateFormatter('%Y-%m')
ax.xaxis.set_major_formatter(xfmt)
width = 20
r1 = None
r2 = None
dates_values = list(zip(*sorted(hist_in.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r1 = axarr[0].bar(dates, values, width, label='incoming')
axarr[0].legend(loc='upper left')
dates_values = list(zip(*sorted(hist_out.items())))
if dates_values and len(dates_values) == 2:
dates, values = dates_values
r2 = axarr[1].bar(dates, values, width, color='r', label='outgoing')
axarr[1].legend(loc='upper left')
if r1 is None and r2 is None:
raise NothingToPlotException()
return plt
| 29.34375
| 76
| 0.633653
|
4146ac6d3c44e023cd5cd3f2f054a678a127f9d9
| 1,570
|
py
|
Python
|
eventsourcing/tests/test_cipher.py
|
alexanderlarin/eventsourcing
|
6f2a4ded3c783ba3ee465243a48f66ecdee20f52
|
[
"BSD-3-Clause"
] | 1
|
2020-02-10T08:12:31.000Z
|
2020-02-10T08:12:31.000Z
|
eventsourcing/tests/test_cipher.py
|
alexanderlarin/eventsourcing
|
6f2a4ded3c783ba3ee465243a48f66ecdee20f52
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/tests/test_cipher.py
|
alexanderlarin/eventsourcing
|
6f2a4ded3c783ba3ee465243a48f66ecdee20f52
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import TestCase
from eventsourcing.exceptions import DataIntegrityError
class TestAESCipher(TestCase):
def test_encrypt_mode_gcm(self):
from eventsourcing.utils.cipher.aes import AESCipher
from eventsourcing.utils.random import encode_random_bytes, decode_bytes
# Unicode string representing 256 random bits encoded with Base64.
cipher_key = encode_random_bytes(num_bytes=32)
# Construct AES cipher.
cipher = AESCipher(cipher_key=decode_bytes(cipher_key))
# Encrypt some plaintext.
ciphertext = cipher.encrypt('plaintext')
self.assertNotEqual(ciphertext, 'plaintext')
# Decrypt some ciphertext.
plaintext = cipher.decrypt(ciphertext)
self.assertEqual(plaintext, 'plaintext')
# Check DataIntegrityError is raised (broken Base64 padding).
with self.assertRaises(DataIntegrityError):
damaged = ciphertext[:-1]
cipher.decrypt(damaged)
# Check DataIntegrityError is raised (MAC check fails).
with self.assertRaises(DataIntegrityError):
damaged = 'a' + ciphertext[:-1]
cipher.decrypt(damaged)
# Check DataIntegrityError is raised (nonce too short).
with self.assertRaises(DataIntegrityError):
damaged = ciphertext[:0]
cipher.decrypt(damaged)
# Check DataIntegrityError is raised (tag too short).
with self.assertRaises(DataIntegrityError):
damaged = ciphertext[:20]
cipher.decrypt(damaged)
| 34.888889
| 80
| 0.67707
|
f463343eba65a4bce1c0b6b31a25811ace4ba4e3
| 19,688
|
py
|
Python
|
test/python/bindings/end_to_end/test_model_dir.py
|
ryansun117/marius
|
c6a81b2ea6b6b468baf5277cf6955f9543b66c82
|
[
"Apache-2.0"
] | null | null | null |
test/python/bindings/end_to_end/test_model_dir.py
|
ryansun117/marius
|
c6a81b2ea6b6b468baf5277cf6955f9543b66c82
|
[
"Apache-2.0"
] | null | null | null |
test/python/bindings/end_to_end/test_model_dir.py
|
ryansun117/marius
|
c6a81b2ea6b6b468baf5277cf6955f9543b66c82
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import shutil
from pathlib import Path
import pytest
import os
import marius as m
import torch
from test.python.constants import TMP_TEST_DIR, TESTING_DATA_DIR
from test.test_data.generate import generate_random_dataset
from test.test_configs.generate_test_configs import generate_configs_for_dataset
def run_configs(directory, model_dir=None, partitioned_eval=False, sequential_train_nodes=False):
for filename in os.listdir(directory):
if filename.startswith("M-"):
config_file = directory / Path(filename)
print("|||||||||||||||| RUNNING CONFIG ||||||||||||||||")
print(config_file)
config = m.config.loadConfig(config_file.__str__(), True)
if model_dir is not None:
config.storage.model_dir = model_dir + "/"
relation_mapping_filepath = Path(config.storage.dataset.dataset_dir) / Path("edges") / Path("relation_mapping.txt")
if relation_mapping_filepath.exists():
shutil.copy(str(relation_mapping_filepath), "{}/{}".format(config.storage.model_dir, "relation_mapping.txt"))
node_mapping_filepath = Path(config.storage.dataset.dataset_dir) / Path("nodes") / Path("node_mapping.txt")
if node_mapping_filepath.exists():
shutil.copy(str(node_mapping_filepath), "{}/{}".format(config.storage.model_dir, "node_mapping.txt"))
if partitioned_eval:
config.storage.full_graph_evaluation = False
if sequential_train_nodes:
config.storage.embeddings.options.node_partition_ordering = m.config.NodePartitionOrdering.SEQUENTIAL
config.storage.features.options.node_partition_ordering = m.config.NodePartitionOrdering.SEQUENTIAL
m.manager.marius_train(config)
def has_model_params(model_dir_path, task="lp", has_embeddings=False, has_relations=True):
if not model_dir_path.exists():
return False, "{} directory with model params not found".format(model_dir_path)
model_file = model_dir_path / Path("model.pt")
if not model_file.exists():
return False, "{} not found".format(model_file)
model_state_file = model_dir_path / Path("model_state.pt")
if not model_state_file.exists():
return False, "{} not found".format(model_state_file)
node_mapping_file = model_dir_path / Path("node_mapping.txt")
if not node_mapping_file.exists():
return False, "{} not found".format(node_mapping_file)
if has_relations:
relation_mapping_file = model_dir_path / Path("node_mapping.txt")
if not relation_mapping_file.exists():
return False, "{} not found".format(relation_mapping_file)
if task == "lp" or has_embeddings:
embeddings_file = model_dir_path / Path("embeddings.bin")
if not embeddings_file.exists():
return False, "{} not found".format(embeddings_file)
embeddings_state_file = model_dir_path / Path("embeddings_state.bin")
if not embeddings_state_file.exists():
return False, "{} not found".format(embeddings_state_file)
rel_mapping_file = model_dir_path / Path("relation_mapping.txt")
if not node_mapping_file.exists():
return False, "{} not found".format(node_mapping_file)
return True, ""
class TestLP(unittest.TestCase):
output_dir = TMP_TEST_DIR / Path("relations")
@classmethod
def setUp(self):
if not self.output_dir.exists():
os.makedirs(self.output_dir)
num_nodes = 100
num_rels = 10
num_edges = 1000
name = "test_graph"
generate_random_dataset(output_dir=self.output_dir / Path(name),
num_nodes=num_nodes,
num_edges=num_edges,
num_rels=num_rels,
splits=[.9, .05, .05],
task="lp")
@classmethod
def tearDown(self):
# pass
if self.output_dir.exists():
shutil.rmtree(self.output_dir)
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_dm(self):
name = "dm"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["distmult"],
storage_names=["in_memory"],
training_names=["sync"],
evaluation_names=["sync"],
task="lp")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path("test_graph") / Path("model_0")
ret, err = has_model_params(model_dir_path)
assert ret == True, err
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path("test_graph") / Path("model_1")
ret, err = has_model_params(model_dir_path)
assert ret == True, err
for i in range(2, 11):
model_dir_path = self.output_dir / Path("test_graph") / Path("model_{}".format(i))
model_dir_path.mkdir(parents=True, exist_ok=True)
model_dir_path = self.output_dir / Path("test_graph") / Path("model_10")
ret, err = has_model_params(model_dir_path)
assert ret == False, err
run_configs(self.output_dir / Path(name))
ret, err = has_model_params(model_dir_path)
assert ret == True, err
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path)
assert ret == True, err
class TestNC(unittest.TestCase):
output_dir = TMP_TEST_DIR / Path("relations")
@classmethod
def setUp(self):
if not self.output_dir.exists():
os.makedirs(self.output_dir)
num_nodes = 500
num_rels = 10
num_edges = 10000
name = "test_graph"
generate_random_dataset(output_dir=self.output_dir / Path(name),
num_nodes=num_nodes,
num_edges=num_edges,
num_rels=num_rels,
splits=[.9, .05, .05],
feature_dim=10,
task="nc")
@classmethod
def tearDown(self):
if self.output_dir.exists():
shutil.rmtree(self.output_dir)
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_gs(self):
name = "gs"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer", "gs_3_layer"],
storage_names=["in_memory"],
training_names=["sync"],
evaluation_names=["sync"],
task="nc")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc")
assert ret == True, err
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_async(self):
name = "async"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer"],
storage_names=["in_memory"],
training_names=["async"],
evaluation_names=["async"],
task="nc")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc")
assert ret == True, err
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_emb(self):
name = "emb"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer_emb", "gs_3_layer_emb"],
storage_names=["in_memory"],
training_names=["sync"],
evaluation_names=["sync"],
task="nc")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc", True)
assert ret == True, err
class TestLPBufferNoRelations(unittest.TestCase):
output_dir = TMP_TEST_DIR / Path("buffer_no_relations")
@classmethod
def setUp(self):
if not self.output_dir.exists():
os.makedirs(self.output_dir)
num_nodes = 100
num_rels = 1
num_edges = 1000
name = "test_graph"
generate_random_dataset(output_dir=self.output_dir / Path(name),
num_nodes=num_nodes,
num_edges=num_edges,
num_rels=num_rels,
num_partitions=8,
splits=[.9, .05, .05],
task="lp")
@classmethod
def tearDown(self):
if self.output_dir.exists():
shutil.rmtree(self.output_dir)
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_dm(self):
name = "dm"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["distmult"],
storage_names=["part_buffer"],
training_names=["sync"],
evaluation_names=["sync"],
task="lp")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "lp", False)
assert ret == True, err
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_partitioned_eval(self):
num_nodes = 100
num_rels = 1
num_edges = 1000
name = "partitioned_eval"
generate_random_dataset(output_dir=self.output_dir / Path(name),
num_nodes=num_nodes,
num_edges=num_edges,
num_rels=num_rels,
splits=[.9, .05, .05],
num_partitions=8,
partitioned_eval=True,
task="lp")
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["distmult"],
storage_names=["part_buffer"],
training_names=["sync"],
evaluation_names=["sync", "async", "async_deg", "async_filtered"],
task="lp")
run_configs(self.output_dir / Path(name), partitioned_eval=True)
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "lp", False)
assert ret == True, err
class TestNCBuffer(unittest.TestCase):
output_dir = TMP_TEST_DIR / Path("buffer")
@classmethod
def setUp(self):
if not self.output_dir.exists():
os.makedirs(self.output_dir)
num_nodes = 500
num_rels = 10
num_edges = 10000
name = "test_graph"
generate_random_dataset(output_dir=self.output_dir / Path(name),
num_nodes=num_nodes,
num_edges=num_edges,
num_rels=num_rels,
splits=[.9, .05, .05],
num_partitions=8,
feature_dim=10,
task="nc")
@classmethod
def tearDown(self):
if self.output_dir.exists():
shutil.rmtree(self.output_dir)
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_gs(self):
name = "gs"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer", "gs_3_layer"],
storage_names=["part_buffer"],
training_names=["sync"],
evaluation_names=["sync"],
task="nc")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc")
assert ret == True, err
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_async(self):
name = "async"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer"],
storage_names=["part_buffer"],
training_names=["async"],
evaluation_names=["async"],
task="nc")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc")
assert ret == True, err
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_emb(self):
name = "emb"
shutil.copytree(self.output_dir / Path("test_graph"), self.output_dir / Path(name))
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer_emb", "gs_3_layer_emb"],
storage_names=["part_buffer"],
training_names=["sync"],
evaluation_names=["sync"],
task="nc")
run_configs(self.output_dir / Path(name))
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc", True)
assert ret == True, err
@pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
def test_partitioned_eval(self):
num_nodes = 500
num_rels = 10
num_edges = 10000
name = "partitioned_eval"
generate_random_dataset(output_dir=self.output_dir / Path(name),
num_nodes=num_nodes,
num_edges=num_edges,
num_rels=num_rels,
splits=[.9, .05, .05],
num_partitions=8,
partitioned_eval=True,
feature_dim=10,
task="nc")
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer_emb", "gs_3_layer_emb", "gs_1_layer", "gs_3_layer"],
storage_names=["part_buffer"],
training_names=["sync"],
evaluation_names=["sync"],
task="nc")
run_configs(self.output_dir / Path(name), partitioned_eval=True)
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc", True)
assert ret == True, err
# @pytest.mark.skipif(os.environ.get("MARIUS_NO_BINDINGS", None) == "TRUE", reason="Requires building the bindings")
@pytest.mark.skip("Sequential ordering tests currently flakey at small scale")
def test_sequential(self):
num_nodes = 500
num_rels = 10
num_edges = 10000
name = "sequential_ordering"
generate_random_dataset(output_dir=self.output_dir / Path(name),
num_nodes=num_nodes,
num_edges=num_edges,
num_rels=num_rels,
splits=[.1, .05, .05],
num_partitions=8,
partitioned_eval=True,
sequential_train_nodes=True,
feature_dim=10,
task="nc")
generate_configs_for_dataset(self.output_dir / Path(name),
model_names=["gs_1_layer_emb", "gs_3_layer_emb", "gs_1_layer", "gs_3_layer"],
storage_names=["part_buffer"],
training_names=["sync"],
evaluation_names=["sync"],
task="nc")
run_configs(self.output_dir / Path(name), partitioned_eval=True, sequential_train_nodes=True)
model_dir_path = self.output_dir / Path(name)
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc", True)
assert ret == True, err
run_configs(self.output_dir / Path(name), partitioned_eval=False, sequential_train_nodes=True)
model_dir_path = self.output_dir / Path(name) / Path("_1")
run_configs(self.output_dir / Path(name), str(model_dir_path))
ret, err = has_model_params(model_dir_path, "nc", True)
assert ret == True, err
| 42.9869
| 131
| 0.552469
|
fd2b88d95b4b384b097101a23f2da893b971540c
| 2,881
|
py
|
Python
|
feature_extraction/genome_browser_tool.py
|
ramseylab/cerenkov
|
19570ad2a47416a70ae7bb066cc67842b3cdee1b
|
[
"Apache-2.0"
] | 1
|
2020-06-25T08:10:10.000Z
|
2020-06-25T08:10:10.000Z
|
ground_truth/osu17/genome_browser_tool.py
|
ramseylab/cerenkov
|
19570ad2a47416a70ae7bb066cc67842b3cdee1b
|
[
"Apache-2.0"
] | 2
|
2017-08-23T21:09:10.000Z
|
2018-03-28T23:42:24.000Z
|
ground_truth/osu17/genome_browser_tool.py
|
ramseylab/cerenkov
|
19570ad2a47416a70ae7bb066cc67842b3cdee1b
|
[
"Apache-2.0"
] | null | null | null |
# This file is copyright 2002 Jim Kent, but license is hereby
# granted for all use - public, private or commercial.
# Bin indexing system used in UCSC Genome Browser
# See http://genomewiki.ucsc.edu/index.php/Bin_indexing_system
# Note that `bin` is NOT a index column. Its ability to accelerate queries is limited.
binOffsets = [512+64+8+1, 64+8+1, 8+1, 1, 0]
binOffsetsExtended = [4096+512+64+8+1, 512+64+8+1, 64+8+1, 8+1, 1, 0]
_binFirstShift = 17 # How much to shift to get to finest bin.
_binNextShift = 3 # How much to shift to get to next larger bin.
_binOffsetOldToExtended = 4681 # From binRange.h
def __bin_from_range_standard(start, end):
"""
Given start,end in chromosome coordinates, assign it a bin.
There's a bin for each 128k segment, for each 1M segment, for each 8M segment, for each 64M segment,
and for each chromosome (which is assumed to be less than 512M.)
A range goes into the smallest bin it will fit in./
"""
start_bin = start
end_bin = end-1
start_bin >>= _binFirstShift
end_bin >>= _binFirstShift
for i in range(0, len(binOffsets)):
if start_bin == end_bin:
return binOffsets[i] + start_bin
start_bin >>= _binNextShift
end_bin >>= _binNextShift
raise ValueError("start {}, end {} out of range in findBin (max is 512M)".format(start, end))
# Add one new level to get coverage past chrom sizes of 512 Mb.
# Effective limit is now the size of an integer since chrom start and
# end coordinates are always being used in int's == 2Gb-1
def __bin_from_range_extended(start, end):
"""
Given start,end in chromosome coordinates, assign it a bin.
There's a bin for each 128k segment, for each 1M segment, for each 8M segment, for each 64M segment,
for each 512M segment, and one top level bin for 4Gb.
Note, since start and end are int's, the practical limit is up to 2Gb-1, and thus,
only four result bins on the second level.
A range goes into the smallest bin it will fit in.
"""
start_bin = start
end_bin = end-1
start_bin >>= _binFirstShift
end_bin >>= _binFirstShift
for i in range(0, len(binOffsetsExtended)):
if start_bin == end_bin:
return _binOffsetOldToExtended + binOffsetsExtended[i] + start_bin
start_bin >>= _binNextShift
end_bin >>= _binNextShift
raise ValueError("start {}, end {} out of range in findBin (max is 2Gb)".format(start, end))
def bin_from_range(start, end):
# Initial implementation is used when `chromEnd` is less than or equal to 536,870,912 = 2^29
# Extended implementation is used when `chromEnd` is greater than 536,870,912 = 2^29 and
# less than 2,147,483,647 = 2^31 - 1
if end <= 2**29:
return __bin_from_range_standard(start, end)
else:
return __bin_from_range_extended(start, end)
| 38.413333
| 104
| 0.688303
|
ad3fe30df71df2956c5eb4e6c23ce0d923b0fe54
| 10,650
|
py
|
Python
|
src/segmentpy/_taskManager/blanketColorPalette_design.py
|
ZeliangSu/LRCS-Xlearn
|
50ff9c64f36c0d80417aa44aac2db68f392130f0
|
[
"Apache-2.0"
] | 4
|
2021-06-08T07:53:55.000Z
|
2022-02-16T15:10:15.000Z
|
src/segmentpy/_taskManager/blanketColorPalette_design.py
|
ZeliangSu/LRCS-Xlearn
|
50ff9c64f36c0d80417aa44aac2db68f392130f0
|
[
"Apache-2.0"
] | 7
|
2021-06-01T21:19:47.000Z
|
2022-02-25T07:36:58.000Z
|
src/segmentpy/_taskManager/blanketColorPalette_design.py
|
ZeliangSu/LRCS-Xlearn
|
50ff9c64f36c0d80417aa44aac2db68f392130f0
|
[
"Apache-2.0"
] | 1
|
2021-11-13T16:44:32.000Z
|
2021-11-13T16:44:32.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/segmentpy/_taskManager/blanketColorPalette.ui',
# licensing of 'src/segmentpy/_taskManager/blanketColorPalette.ui' applies.
#
# Created: Tue May 4 10:18:09 2021
# by: pyside2-uic running on PySide2 5.9.0~a1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Blanket(object):
def setupUi(self, Blanket):
Blanket.setObjectName("Blanket")
Blanket.resize(632, 200)
self.gridLayout = QtWidgets.QGridLayout(Blanket)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_global = QtWidgets.QVBoxLayout()
self.verticalLayout_global.setObjectName("verticalLayout_global")
self.horizontalLayout_1 = QtWidgets.QHBoxLayout()
self.horizontalLayout_1.setObjectName("horizontalLayout_1")
self.verticalLayout231 = QtWidgets.QVBoxLayout()
self.verticalLayout231.setObjectName("verticalLayout231")
self.mdlL = QtWidgets.QLabel(Blanket)
self.mdlL.setAlignment(QtCore.Qt.AlignCenter)
self.mdlL.setObjectName("mdlL")
self.verticalLayout231.addWidget(self.mdlL)
self.horizontalLayout231 = QtWidgets.QHBoxLayout()
self.horizontalLayout231.setObjectName("horizontalLayout231")
self.label2310 = QtWidgets.QLabel(Blanket)
self.label2310.setObjectName("label2310")
self.horizontalLayout231.addWidget(self.label2310)
self.pushButton2310 = QtWidgets.QPushButton(Blanket)
self.pushButton2310.setObjectName("pushButton2310")
self.horizontalLayout231.addWidget(self.pushButton2310)
self.verticalLayout231.addLayout(self.horizontalLayout231)
self.horizontalLayout_1.addLayout(self.verticalLayout231)
self.line = QtWidgets.QFrame(Blanket)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout_1.addWidget(self.line)
self.verticalLayout232 = QtWidgets.QVBoxLayout()
self.verticalLayout232.setObjectName("verticalLayout232")
self.batchL = QtWidgets.QLabel(Blanket)
self.batchL.setAlignment(QtCore.Qt.AlignCenter)
self.batchL.setObjectName("batchL")
self.verticalLayout232.addWidget(self.batchL)
self.horizontalLayout232 = QtWidgets.QHBoxLayout()
self.horizontalLayout232.setObjectName("horizontalLayout232")
self.label2320 = QtWidgets.QLabel(Blanket)
self.label2320.setObjectName("label2320")
self.horizontalLayout232.addWidget(self.label2320)
self.pushButton2320 = QtWidgets.QPushButton(Blanket)
self.pushButton2320.setObjectName("pushButton2320")
self.horizontalLayout232.addWidget(self.pushButton2320)
self.verticalLayout232.addLayout(self.horizontalLayout232)
self.horizontalLayout_1.addLayout(self.verticalLayout232)
self.line_2 = QtWidgets.QFrame(Blanket)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout_1.addWidget(self.line_2)
self.verticalLayout233 = QtWidgets.QVBoxLayout()
self.verticalLayout233.setObjectName("verticalLayout233")
self.kernelL = QtWidgets.QLabel(Blanket)
self.kernelL.setAlignment(QtCore.Qt.AlignCenter)
self.kernelL.setObjectName("kernelL")
self.verticalLayout233.addWidget(self.kernelL)
self.horizontalLayout233 = QtWidgets.QHBoxLayout()
self.horizontalLayout233.setObjectName("horizontalLayout233")
self.label2330 = QtWidgets.QLabel(Blanket)
self.label2330.setObjectName("label2330")
self.horizontalLayout233.addWidget(self.label2330)
self.pushButton2330 = QtWidgets.QPushButton(Blanket)
self.pushButton2330.setObjectName("pushButton2330")
self.horizontalLayout233.addWidget(self.pushButton2330)
self.verticalLayout233.addLayout(self.horizontalLayout233)
self.horizontalLayout_1.addLayout(self.verticalLayout233)
self.verticalLayout_global.addLayout(self.horizontalLayout_1)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.verticalLayout234 = QtWidgets.QVBoxLayout()
self.verticalLayout234.setObjectName("verticalLayout234")
self.ncL = QtWidgets.QLabel(Blanket)
self.ncL.setAlignment(QtCore.Qt.AlignCenter)
self.ncL.setObjectName("ncL")
self.verticalLayout234.addWidget(self.ncL)
self.horizontalLayout234 = QtWidgets.QHBoxLayout()
self.horizontalLayout234.setObjectName("horizontalLayout234")
self.label2340 = QtWidgets.QLabel(Blanket)
self.label2340.setObjectName("label2340")
self.horizontalLayout234.addWidget(self.label2340)
self.pushButton2340 = QtWidgets.QPushButton(Blanket)
self.pushButton2340.setObjectName("pushButton2340")
self.horizontalLayout234.addWidget(self.pushButton2340)
self.verticalLayout234.addLayout(self.horizontalLayout234)
self.horizontalLayout_7.addLayout(self.verticalLayout234)
self.line_3 = QtWidgets.QFrame(Blanket)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.horizontalLayout_7.addWidget(self.line_3)
self.verticalLayout235 = QtWidgets.QVBoxLayout()
self.verticalLayout235.setObjectName("verticalLayout235")
self.lrL = QtWidgets.QLabel(Blanket)
self.lrL.setAlignment(QtCore.Qt.AlignCenter)
self.lrL.setObjectName("lrL")
self.verticalLayout235.addWidget(self.lrL)
self.horizontalLayout235 = QtWidgets.QHBoxLayout()
self.horizontalLayout235.setObjectName("horizontalLayout235")
self.label2350 = QtWidgets.QLabel(Blanket)
self.label2350.setObjectName("label2350")
self.horizontalLayout235.addWidget(self.label2350)
self.pushButton2350 = QtWidgets.QPushButton(Blanket)
self.pushButton2350.setObjectName("pushButton2350")
self.horizontalLayout235.addWidget(self.pushButton2350)
self.verticalLayout235.addLayout(self.horizontalLayout235)
self.horizontalLayout_7.addLayout(self.verticalLayout235)
self.line_4 = QtWidgets.QFrame(Blanket)
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.horizontalLayout_7.addWidget(self.line_4)
self.verticalLayout236 = QtWidgets.QVBoxLayout()
self.verticalLayout236.setObjectName("verticalLayout236")
self.lkL = QtWidgets.QLabel(Blanket)
self.lkL.setAlignment(QtCore.Qt.AlignCenter)
self.lkL.setObjectName("lkL")
self.verticalLayout236.addWidget(self.lkL)
self.horizontalLayout236 = QtWidgets.QHBoxLayout()
self.horizontalLayout236.setObjectName("horizontalLayout236")
self.label2360 = QtWidgets.QLabel(Blanket)
self.label2360.setObjectName("label2360")
self.horizontalLayout236.addWidget(self.label2360)
self.pushButton2360 = QtWidgets.QPushButton(Blanket)
self.pushButton2360.setObjectName("pushButton2360")
self.horizontalLayout236.addWidget(self.pushButton2360)
self.verticalLayout236.addLayout(self.horizontalLayout236)
self.horizontalLayout_7.addLayout(self.verticalLayout236)
self.verticalLayout_global.addLayout(self.horizontalLayout_7)
self.verticalLayout.addLayout(self.verticalLayout_global)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.buttonBox = QtWidgets.QDialogButtonBox(Blanket)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(Blanket)
QtCore.QMetaObject.connectSlotsByName(Blanket)
def retranslateUi(self, Blanket):
Blanket.setWindowTitle(QtWidgets.QApplication.translate("Blanket", "Form", None, -1))
self.mdlL.setText(QtWidgets.QApplication.translate("Blanket", "model", None, -1))
self.label2310.setText(QtWidgets.QApplication.translate("Blanket", "TextLabel", None, -1))
self.pushButton2310.setText(QtWidgets.QApplication.translate("Blanket", "PushButton", None, -1))
self.batchL.setText(QtWidgets.QApplication.translate("Blanket", "batch", None, -1))
self.label2320.setText(QtWidgets.QApplication.translate("Blanket", "TextLabel", None, -1))
self.pushButton2320.setText(QtWidgets.QApplication.translate("Blanket", "PushButton", None, -1))
self.kernelL.setText(QtWidgets.QApplication.translate("Blanket", "kernel", None, -1))
self.label2330.setText(QtWidgets.QApplication.translate("Blanket", "TextLabel", None, -1))
self.pushButton2330.setText(QtWidgets.QApplication.translate("Blanket", "PushButton", None, -1))
self.ncL.setText(QtWidgets.QApplication.translate("Blanket", "nb_conv", None, -1))
self.label2340.setText(QtWidgets.QApplication.translate("Blanket", "TextLabel", None, -1))
self.pushButton2340.setText(QtWidgets.QApplication.translate("Blanket", "PushButton", None, -1))
self.lrL.setText(QtWidgets.QApplication.translate("Blanket", "learning rate: init", None, -1))
self.label2350.setText(QtWidgets.QApplication.translate("Blanket", "TextLabel", None, -1))
self.pushButton2350.setText(QtWidgets.QApplication.translate("Blanket", "PushButton", None, -1))
self.lkL.setText(QtWidgets.QApplication.translate("Blanket", "learning rate: decay", None, -1))
self.label2360.setText(QtWidgets.QApplication.translate("Blanket", "TextLabel", None, -1))
self.pushButton2360.setText(QtWidgets.QApplication.translate("Blanket", "PushButton", None, -1))
| 58.839779
| 114
| 0.734366
|
d46dd25a7f67fc0faf562ea063b646920eba78ca
| 5,449
|
py
|
Python
|
PDI - Fourier e Wavelet/01 - Fourier/codes/Python/src/T13+-+Code.py
|
lapisco/Lapisco_Courses
|
3c0346b2c787307a52d6bee32f1a04efb4bba65d
|
[
"MIT"
] | 2
|
2020-01-03T15:32:39.000Z
|
2020-02-27T22:49:26.000Z
|
PDI - Fourier e Wavelet/01 - Fourier/codes/Python/src/T13+-+Code.py
|
lapisco/Lapisco_Courses
|
3c0346b2c787307a52d6bee32f1a04efb4bba65d
|
[
"MIT"
] | null | null | null |
PDI - Fourier e Wavelet/01 - Fourier/codes/Python/src/T13+-+Code.py
|
lapisco/Lapisco_Courses
|
3c0346b2c787307a52d6bee32f1a04efb4bba65d
|
[
"MIT"
] | 9
|
2019-09-24T16:42:52.000Z
|
2021-09-14T19:33:49.000Z
|
# coding: utf-8
# # Introdução
# A transformação para domínios alternativos ao tempo contínuo ou discreto pode provê características importantes para análise do sinal. Em particular a transformação para o dominío da frequência utilizando a transformada de Fourier visa exibir comportamentos relativos as parte que compoem o sinal.
#
# Qualquer sinal pode ser representado como uma soma infinita de senos e cossenos, tais componentes são chamadas de harmônicos. Então, uma análise em dominio da frequência pela transformada de fourier (também chamada de análise espectral) induz ao projetista identificar de forma direta as componentes e frequências presentes naquele sinal. Esta é uma forma de ver as menores partes presentes em um sinal.
#
# Em relação as imagens o processamento desta transformada é traduzida matematicamente pela aplicação da seguinte transfomada no sinal:
#
# $$F[m,n] = \frac{1}{UV} \int_o^U \int_o^V F(u,v) e^{j2\pi(um.x0 + un.y0} du dv$$
# ## Implementação e discussões
# In[1]:
import cv2
import numpy as np
import matplotlib.pyplot as plt
# - Abrir imagem:
# In[2]:
img = np.array(cv2.imread('lena.png', cv2.IMREAD_GRAYSCALE))
rows, cols = img.shape
# - Aplicar a transformada discrea de Fourier:
# In[3]:
img_dft = np.fft.fft2(img)
# - Trazer a componente DC para o centro da imagem, transladando n/2 nas duas direções:
# In[4]:
img_dft_shift = np.fft.fftshift(img_dft)
# - Calcular a magnitude a partir da componente real e imaginária:
# In[5]:
img_dft_mag = np.abs(img_dft_shift)
# In[6]:
plt.figure(2,figsize=(10,9))
plt.subplot(121)
plt.imshow(img, 'gray')
plt.title("Imagem original")
plt.axis('OFF')
plt.subplot(122)
plt.imshow(20*np.log(img_dft_mag), 'gray')
plt.title("Espectro em frequência")
plt.axis('OFF')
plt.show()
# - Cálculo da inversa:
# In[7]:
img_idft = np.fft.ifft2(img_dft)
img_inversa = np.abs(img_idft)
#print(img_idft)
plt.figure(3)
plt.imshow(img_inversa, 'gray')
plt.title("Imagem após IDFT")
plt.axis('OFF')
plt.show()
# O processo de conversão para o domínio da frequência, utilizando a transformada de Fourier provê uma forma gráfica de retirar informações da imagem. No centro do espectro está a componente DC, de frequência zero, e os valores de frequência aumentam no sentido do centro para a borda da imagem.
#
# O processo de transformada inversa foi realizado com éxito, retornando para a imagem original.
# ## Filtragem na frequência
# Assim como no domínio do tempo, é possível realizar processos de filtragem no dominio da frequência. Enquanto no domínio espacial um processo de filtragem era representado por uma convolução entre dois sinais, no domníno da frequência é representado por uma simples multiplicação ponto a ponto, simplificando bastante o processo de filtragem.
# ### Filtro passa-baixa:
# - Criar máscara gaussiana
# In[8]:
def gaussianKernel(h1, h2):
import numpy as np
import matplotlib.pyplot as plt
import math as m
## Returns a normalized 2D gauss kernel array for general purporses
x, y = np.mgrid[0:h2, 0:h1]
x = x-h2/2
y = y-h1/2
sigma = 1
g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) )
return g / g.sum()
filterKernel = gaussianKernel(rows,cols)
# In[9]:
filter_dft = np.fft.fft2(filterKernel)
filter_dft_shift = np.fft.fftshift(filter_dft)
filter_dft_mag = np.abs(filter_dft_shift)
plt.figure(3+1)
plt.imshow(filter_dft_mag, 'gray')
plt.title("Espectro em frequência do filtro Gaussiano com spread 1")
plt.show()
# - Filtragem na frequência
# In[10]:
filter_img = img_dft_shift * filter_dft_shift
filter_img_mag = np.abs(filter_img)
img_back = np.fft.fftshift(np.fft.ifft2(filter_img))
img_back_mag = np.abs(img_back)
plt.figure(5, figsize=(12,12))
plt.subplot(221)
plt.imshow((img), 'gray')
plt.title("Imagem original")
plt.subplot(222)
plt.imshow(20*np.log(img_dft_mag), 'gray')
plt.title("Espectro da imagem original")
plt.subplot(223)
plt.imshow(img_back_mag, 'gray')
plt.title("Imagem após filtragem na frequência")
plt.subplot(224)
plt.imshow(20*np.log(filter_img_mag), 'gray')
plt.title("Especto da imagem após filtragem na frequência")
plt.show()
# O filtro escolhido foi o filtro gaussino, funcionando como passa-baixa, pois atenua frequências altas frequência e permite a passagem de frequências abaixo o valor da frequência de corte.
#
# O efeito de borramento ao final da imagem é explicado pela comparação entre o espectro original e o espectro após filtragem da seguinte forma: valores incostante e pontuais, são traduzidos como transições repentinas (alta frequência) na imagem e vistas nos espectro de forma pontual, após a aplicação do filtro passa-baixa estas transições são atenuada e o espector exibe menos transições, o resultado após a transformada inversa é a imagem original, porém com o efeito de borramento, já esperado pelo filtro gaussiano.
# # Conclusões
# A análise de imagens no domínio da frequência é uma ferramenta que provê análises visuais perante a imagem, exibe padrões de comportamento das cores na imagem, podem resultar em identificação de componentes como ruído ou objetos ao olhar para o espectro.
#
# Há a possibilidade de realizar filtragem no domínio da frequência, assim no domínio temporal, é um processa mais rápido pois envolve uma multiplica simples ponto a ponto, talvez o grande parte do custo computacional em executar tal técnica está em realizar a transformada direta e inversa de Fourier.
| 32.242604
| 521
| 0.758121
|
6ddc8dc31ef9bc47e72cb210e98f1a3586326b90
| 1,427
|
py
|
Python
|
src/main/wallpaper/wallpaperCompiler.py
|
cassianomaia/compilador-wallpaper
|
e4aa4cb969b0e49148c3177af60851310519f55e
|
[
"MIT"
] | null | null | null |
src/main/wallpaper/wallpaperCompiler.py
|
cassianomaia/compilador-wallpaper
|
e4aa4cb969b0e49148c3177af60851310519f55e
|
[
"MIT"
] | null | null | null |
src/main/wallpaper/wallpaperCompiler.py
|
cassianomaia/compilador-wallpaper
|
e4aa4cb969b0e49148c3177af60851310519f55e
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import sys
from antlr4 import *
from wallpaperLexer import wallpaperLexer
from wallpaperParser import wallpaperParser
from Semantico import Semantico
from Wallpaper import Wallpaper
def main(argv):
input = FileStream(argv[1])
lexer = wallpaperLexer(input)
stream = CommonTokenStream(lexer)
parser = wallpaperParser(stream)
tree = parser.programa()
analisador_semantico = Semantico()
analisador_semantico.visitPrograma(tree)
print('----- Imagens -----')
for tabela in analisador_semantico.imagens.tabelas:
print(tabela.nome_tabela)
for simbolo in tabela.simbolos:
print(simbolo.tipo, simbolo.valor)
print()
print('----- Formas -----')
for tabela in analisador_semantico.formas.tabelas:
print('------\n')
print(tabela.nome_tabela)
for simbolo in tabela.simbolos:
print(simbolo.tipo, simbolo.valor)
print()
print('----- Textos -----')
for tabela in analisador_semantico.texto.tabelas:
print('------\n')
print(tabela.nome_tabela)
for simbolo in tabela.simbolos:
print(simbolo.tipo, simbolo.valor)
imagens = analisador_semantico.imagens
formas = analisador_semantico.formas
textos = analisador_semantico.texto
w = Wallpaper(imagens, formas, textos)
w.run()
if __name__ == '__main__':
main(sys.argv)
| 27.442308
| 55
| 0.672039
|
9fc3ab97104abe9c32ee27eee6a52fbaf7d71e40
| 123,910
|
py
|
Python
|
preprocess/segmentation/mrcnn/model.py
|
wan-h/JD-AI-Fashion-Challenge
|
817f693672f418745e3a4c89a0417a3165b08130
|
[
"MIT"
] | 3
|
2018-05-06T15:15:21.000Z
|
2018-05-13T12:31:42.000Z
|
preprocess/segmentation/mrcnn/model.py
|
wan-h/JD-AI-Fashion-Challenge
|
817f693672f418745e3a4c89a0417a3165b08130
|
[
"MIT"
] | null | null | null |
preprocess/segmentation/mrcnn/model.py
|
wan-h/JD-AI-Fashion-Challenge
|
817f693672f418745e3a4c89a0417a3165b08130
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
The main Mask R-CNN model implemenetation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import skimage.transform
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from preprocess.segmentation.mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f} {}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else "",
array.dtype))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when inferencing
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layres
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(6000, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1],)
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is depricated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmentors that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return (augmenter.__class__.__name__ in MASK_AUGMENTERS)
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(skimage.transform.resize(
class_mask, (gt_h, gt_w), order=1, mode="constant")).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = skimage.transform.resize(m, config.MASK_SHAPE, order=1, mode="constant")
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask = \
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2 ** 6 != int(h / 2 ** 6) or w / 2 ** 6 != int(w / 2 ** 6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, 3], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training" \
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask = \
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox = \
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox = \
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
log_dir: The directory where events and weights are saved
checkpoint_path: the path to the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
return None, None
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
return dir_name, None
checkpoint = os.path.join(dir_name, checkpoints[-1])
return dir_name, checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the correspoding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exlude: list of layer names to excluce
"""
import h5py
from keras.engine import topology
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model") \
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
topology.load_weights_from_hdf5_group_by_name(f, layers)
else:
topology.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/' \
'releases/download/v0.2/' \
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model") \
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gausssian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1) \
if full_masks else np.empty(masks.shape[1:3] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, \
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ = \
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks = \
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also retruned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE, \
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ = \
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks = \
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and noramlized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| 43.846426
| 115
| 0.610839
|
3d234db44ccc9e505ca50662dfbe06091e5327ff
| 2,788
|
py
|
Python
|
ml/equationGen.py
|
Shivams9/pythoncodecamp
|
e6cd27f4704a407ee360414a8c9236b254117a59
|
[
"MIT"
] | 6
|
2021-08-04T08:15:22.000Z
|
2022-02-02T11:15:56.000Z
|
ML/equationGen.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 14
|
2021-08-02T06:28:00.000Z
|
2022-03-25T10:44:15.000Z
|
ML/equationGen.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 6
|
2021-07-16T04:56:41.000Z
|
2022-02-16T04:40:06.000Z
|
#from sympy import symbols,diff
import cv2
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
import numpy as np
"""class PredictorImage:
def __init__(self,pic,label):
self.pic = pic
self.label = label"""
def readimg(path):
a= cv2.imread(path)
return a
def showimg(img,imgname):
cv2.imshow(imgname,img)
cv2.waitKey(0)
def f(a): #
sum=0
for i in range(len(a)):
if a[i] == 1:
sum += (i+1)**2
sum +=1
return sum
def getThreshold(pic):
mr,mc,mz=pic.shape
sum = 0
for r in range(mr):
for c in range(mc):
avg = (int(pic[r][c][0])+int(pic[r][c][1])+int(pic[r][c][2]))//3
sum += avg
return int(sum//(mr*mc))
def blackwhite(img):
pic = img.copy()
t= getThreshold(pic)
mr,mc,mz=pic.shape
for r in range(mr):
for c in range(mc):
avg = (int(pic[r][c][0]) + int(pic[r][c][1]) + int(pic[r][c][2])) // 3
if avg <= t:
pic[r][c]=[0,0,0]
else:
pic[r][c]=[255,255,255]
return pic
def grayscale(img):
pic = img.copy()
mr,mc,mz=pic.shape
for r in range(mr):
for c in range(mc):
avg = int(int(pic[r][c][0])+int(pic[r][c][1])+int(pic[r][c][2])//3)
pic[r][c] = [avg,avg,avg]
return pic
def onedarray(pic):
mr,mc,mz=pic.shape
l=[]
#count =1;
for r in range(mr):
for c in range(mc):
#print(count)
if pic[r][c][1] == 255:
l.append(0)
else:
l.append(1)
#count +=1
return l
def imgvalue(img):
bw = blackwhite(img)
oned = onedarray(bw)
return f(oned)
def classification(n,imgvalue1,imgvalue2,imgvalue3,imgvalue4,imgvalue5):
l=[]
for i in range(len(n)):
if n[i] <= imgvalue4:
l.append(4)
elif n[i] <= imgvalue2:
l.append(2)
elif n[i] <= imgvalue3:
l.append(3)
elif n[i] <= imgvalue5:
l.append(5)
elif n[i] <= imgvalue1:
l.append(1)
return l
#listofpics=[PredictorImage(readimg("one.png",1))]
pic1 = readimg("one.PNG")
showimg(pic1,"One")
pic2 = readimg("two.PNG")
pic3 = readimg("three.PNG")
pic4 = readimg("four.PNG")
pic5 = readimg("five.PNG")
showimg(pic5,"five")
print("1",imgvalue(pic1))
print("2",imgvalue(pic2))
print("3",imgvalue(pic3))
print("4",imgvalue(pic4))
print("5",imgvalue(pic5))
l = [1,2,3,4,5]
p = [imgvalue(pic1),imgvalue(pic2),imgvalue(pic3),imgvalue(pic4),imgvalue(pic5)]
imgv = np.linspace(4646160000,7994260792,200)
c=classification(imgv,p[0],p[1],p[2],p[3],p[4])
print(len(c))
print(len(imgv))
plt.plot(imgv,c,color="red",marker="o")
plt.show()
| 25.577982
| 86
| 0.539096
|
72c7a4e7bc9c0b19204a0f02913f8a083242441c
| 123
|
py
|
Python
|
leet/settings.py
|
Syhen/leet-code
|
55ab719de012693588da878cc97f20d9b9f32ab5
|
[
"MIT"
] | null | null | null |
leet/settings.py
|
Syhen/leet-code
|
55ab719de012693588da878cc97f20d9b9f32ab5
|
[
"MIT"
] | null | null | null |
leet/settings.py
|
Syhen/leet-code
|
55ab719de012693588da878cc97f20d9b9f32ab5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
create on 2020-04-16 12:26
author @66492
"""
import os
BASE_PATH = os.path.dirname(__file__)
| 12.3
| 37
| 0.642276
|
84ac51530b496793069539f88e30d15f0fee4c01
| 4,071
|
py
|
Python
|
influxdb_client/domain/run_log.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | 1
|
2021-06-06T10:39:47.000Z
|
2021-06-06T10:39:47.000Z
|
influxdb_client/domain/run_log.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | null | null | null |
influxdb_client/domain/run_log.py
|
kelseiv/influxdb-client-python
|
9a0d2d659157cca96f6a04818fdeb215d699bdd7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class RunLog(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'run_id': 'str',
'time': 'str',
'message': 'str'
}
attribute_map = {
'run_id': 'runID',
'time': 'time',
'message': 'message'
}
def __init__(self, run_id=None, time=None, message=None): # noqa: E501
"""RunLog - a model defined in OpenAPI""" # noqa: E501
self._run_id = None
self._time = None
self._message = None
self.discriminator = None
if run_id is not None:
self.run_id = run_id
if time is not None:
self.time = time
if message is not None:
self.message = message
@property
def run_id(self):
"""Gets the run_id of this RunLog. # noqa: E501
:return: The run_id of this RunLog. # noqa: E501
:rtype: str
"""
return self._run_id
@run_id.setter
def run_id(self, run_id):
"""Sets the run_id of this RunLog.
:param run_id: The run_id of this RunLog. # noqa: E501
:type: str
"""
self._run_id = run_id
@property
def time(self):
"""Gets the time of this RunLog. # noqa: E501
:return: The time of this RunLog. # noqa: E501
:rtype: str
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this RunLog.
:param time: The time of this RunLog. # noqa: E501
:type: str
"""
self._time = time
@property
def message(self):
"""Gets the message of this RunLog. # noqa: E501
:return: The message of this RunLog. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this RunLog.
:param message: The message of this RunLog. # noqa: E501
:type: str
"""
self._message = message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RunLog):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.672727
| 124
| 0.53918
|
dc14657c3f3e415b728c5c54fd46b0f78a102754
| 3,344
|
py
|
Python
|
bitcoin-talk-crawler/bitcoin_talk_crawler/settings.py
|
daedalus/scraper
|
7052024d0113dc71896eafba3843307054cf4394
|
[
"MIT"
] | 11
|
2017-08-11T09:43:56.000Z
|
2021-03-27T13:47:48.000Z
|
bitcoin-talk-crawler/bitcoin_talk_crawler/settings.py
|
Georgehe4/scraper
|
7052024d0113dc71896eafba3843307054cf4394
|
[
"MIT"
] | 1
|
2021-11-13T12:22:54.000Z
|
2021-11-13T12:22:54.000Z
|
bitcoin_talk_crawler/settings.py
|
goldmar/bitcoin-talk-crawler
|
f5b5b229e61d8721165a0ee2a0add7101c316bbf
|
[
"Apache-2.0"
] | 7
|
2018-01-26T02:31:55.000Z
|
2021-11-13T12:14:20.000Z
|
# -*- coding: utf-8 -*-
# Scrapy settings for bitcoin_talk_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'bitcoin_talk_crawler'
SPIDER_MODULES = ['bitcoin_talk_crawler.spiders']
NEWSPIDER_MODULE = 'bitcoin_talk_crawler.spiders'
# breadth-first crawl
DEPTH_PRIORITY = 1
SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleFifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.FifoMemoryQueue'
# LOGGING
LOG_STDOUT = True
LOG_FILE = 'scrapy_log.txt'
LOG_LEVEL = 'INFO'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'bitcoin_talk_crawler (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS=64
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY=1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'bitcoin_talk_crawler.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'bitcoin_talk_crawler.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'bitcoin_talk_crawler.pipelines.BitcoinTalkCrawlerPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.2
| 109
| 0.794557
|
34b58b03deb2c99fa3a1ca8d6a683116576c3af9
| 8,692
|
py
|
Python
|
python2/tests/DirectSMBConnectionTests/test_storefile.py
|
0x4a47/pysmb
|
79dbb2039b97589cae913769fcfe27035e302d40
|
[
"Zlib"
] | null | null | null |
python2/tests/DirectSMBConnectionTests/test_storefile.py
|
0x4a47/pysmb
|
79dbb2039b97589cae913769fcfe27035e302d40
|
[
"Zlib"
] | null | null | null |
python2/tests/DirectSMBConnectionTests/test_storefile.py
|
0x4a47/pysmb
|
79dbb2039b97589cae913769fcfe27035e302d40
|
[
"Zlib"
] | null | null | null |
# -*- coding: utf-8 -*-
import os, tempfile, random, time
from StringIO import StringIO
from smb.SMBConnection import SMBConnection
from smb.smb2_constants import SMB2_DIALECT_2
from util import getConnectionInfo
from nose.tools import with_setup
from smb import smb_structs
try:
import hashlib
def MD5(): return hashlib.md5()
except ImportError:
import md5
def MD5(): return md5.new()
conn = None
TEST_FILENAME = os.path.join(os.path.dirname(__file__), os.pardir, 'SupportFiles', 'binary.dat')
TEST_FILESIZE = 256000
TEST_DIGEST = 'bb6303f76e29f354b6fdf6ef58587e48'
def setup_func_SMB1():
global conn
smb_structs.SUPPORT_SMB2 = smb_structs.SUPPORT_SMB2x = False
info = getConnectionInfo()
conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True, is_direct_tcp = True)
assert conn.connect(info['server_ip'], info['server_port'])
def setup_func_SMB2():
global conn
smb_structs.SUPPORT_SMB2 = True
smb_structs.SUPPORT_SMB2x = False
info = getConnectionInfo()
conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True, is_direct_tcp = True)
assert conn.connect(info['server_ip'], info['server_port'])
def setup_func_SMB2x():
global conn
smb_structs.SUPPORT_SMB2 = smb_structs.SUPPORT_SMB2x = True
info = getConnectionInfo()
conn = SMBConnection(info['user'], info['password'], info['client_name'], info['server_name'], use_ntlm_v2 = True, is_direct_tcp = True)
assert conn.connect(info['server_ip'], info['server_port'])
def teardown_func():
global conn
conn.close()
@with_setup(setup_func_SMB1, teardown_func)
def test_store_long_filename_SMB1():
global conn
filename = os.sep + 'StoreTest %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
filesize = conn.storeFile('smbtest', filename, open(TEST_FILENAME, 'rb'))
assert filesize == TEST_FILESIZE
entries = conn.listPath('smbtest', os.path.dirname(filename.replace('/', os.sep)))
filenames = map(lambda e: e.filename, entries)
assert os.path.basename(filename.replace('/', os.sep)) in filenames
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == TEST_FILESIZE
md = MD5()
md.update(buf.getvalue())
assert md.hexdigest() == TEST_DIGEST
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB1, teardown_func)
def test_store_from_offset_SMB1():
global conn
filename = os.sep + 'StoreTest %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
buf = StringIO('0123456789')
filesize = conn.storeFile('smbtest', filename, buf)
assert filesize == 10
buf = StringIO('aa')
pos = conn.storeFileFromOffset('smbtest', filename, buf, 5)
assert pos == 7
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == 10
assert buf.getvalue() == '01234aa789'
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB2, teardown_func)
def test_store_long_filename_SMB2():
global conn
assert conn.smb2_dialect == SMB2_DIALECT_2
filename = os.sep + 'StoreTest %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
filesize = conn.storeFile('smbtest', filename, open(TEST_FILENAME, 'rb'))
assert filesize == TEST_FILESIZE
entries = conn.listPath('smbtest', os.path.dirname(filename.replace('/', os.sep)))
filenames = map(lambda e: e.filename, entries)
assert os.path.basename(filename.replace('/', os.sep)) in filenames
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == TEST_FILESIZE
md = MD5()
md.update(buf.getvalue())
assert md.hexdigest() == TEST_DIGEST
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB2x, teardown_func)
def test_store_long_filename_SMB2x():
global conn
assert conn.smb2_dialect != SMB2_DIALECT_2
filename = os.sep + 'StoreTest %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
filesize = conn.storeFile('smbtest', filename, open(TEST_FILENAME, 'rb'))
assert filesize == TEST_FILESIZE
entries = conn.listPath('smbtest', os.path.dirname(filename.replace('/', os.sep)))
filenames = map(lambda e: e.filename, entries)
assert os.path.basename(filename.replace('/', os.sep)) in filenames
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == TEST_FILESIZE
md = MD5()
md.update(buf.getvalue())
assert md.hexdigest() == TEST_DIGEST
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB1, teardown_func)
def test_store_unicode_filename_SMB1():
global conn
filename = os.sep + u'上载测试 %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
filesize = conn.storeFile('smbtest', filename, open(TEST_FILENAME, 'rb'))
assert filesize == TEST_FILESIZE
entries = conn.listPath('smbtest', os.path.dirname(filename.replace('/', os.sep)))
filenames = map(lambda e: e.filename, entries)
assert os.path.basename(filename.replace('/', os.sep)) in filenames
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == TEST_FILESIZE
md = MD5()
md.update(buf.getvalue())
assert md.hexdigest() == TEST_DIGEST
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB2, teardown_func)
def test_store_unicode_filename_SMB2():
global conn
assert conn.smb2_dialect == SMB2_DIALECT_2
filename = os.sep + u'上载测试 %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
filesize = conn.storeFile('smbtest', filename, open(TEST_FILENAME, 'rb'))
assert filesize == TEST_FILESIZE
entries = conn.listPath('smbtest', os.path.dirname(filename.replace('/', os.sep)))
filenames = map(lambda e: e.filename, entries)
assert os.path.basename(filename.replace('/', os.sep)) in filenames
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == TEST_FILESIZE
md = MD5()
md.update(buf.getvalue())
assert md.hexdigest() == TEST_DIGEST
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB2x, teardown_func)
def test_store_unicode_filename_SMB2x():
global conn
assert conn.smb2_dialect != SMB2_DIALECT_2
filename = os.sep + u'上载测试 %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
filesize = conn.storeFile('smbtest', filename, open(TEST_FILENAME, 'rb'))
assert filesize == TEST_FILESIZE
entries = conn.listPath('smbtest', os.path.dirname(filename.replace('/', os.sep)))
filenames = map(lambda e: e.filename, entries)
assert os.path.basename(filename.replace('/', os.sep)) in filenames
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == TEST_FILESIZE
md = MD5()
md.update(buf.getvalue())
assert md.hexdigest() == TEST_DIGEST
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB2, teardown_func)
def test_store_from_offset_SMB2():
global conn
assert conn.smb2_dialect == SMB2_DIALECT_2
filename = os.sep + 'StoreTest %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
buf = StringIO('0123456789')
filesize = conn.storeFile('smbtest', filename, buf)
assert filesize == 10
buf = StringIO('aa')
pos = conn.storeFileFromOffset('smbtest', filename, buf, 5)
assert pos == 7
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == 10
assert buf.getvalue() == '01234aa789'
buf.close()
conn.deleteFiles('smbtest', filename)
@with_setup(setup_func_SMB2x, teardown_func)
def test_store_from_offset_SMB2x():
global conn
assert conn.smb2_dialect != SMB2_DIALECT_2
filename = os.sep + 'StoreTest %d-%d.dat' % ( time.time(), random.randint(0, 10000) )
buf = StringIO('0123456789')
filesize = conn.storeFile('smbtest', filename, buf)
assert filesize == 10
buf = StringIO('aa')
pos = conn.storeFileFromOffset('smbtest', filename, buf, 5)
print(pos)
assert pos == 7
buf = StringIO()
file_attributes, file_size = conn.retrieveFile('smbtest', filename, buf)
assert file_size == 10
assert buf.getvalue() == '01234aa789'
buf.close()
conn.deleteFiles('smbtest', filename)
| 32.192593
| 140
| 0.692706
|
ce1ef3532cc9075a0b49a3cc463a0801d71434f9
| 617
|
py
|
Python
|
Chapter16/example1.py
|
DeeMATT/AdvancedPythonProgramming
|
97091dae4f177fd2c06b20265be2aedf9d1c41e7
|
[
"MIT"
] | 66
|
2018-11-21T02:07:16.000Z
|
2021-11-08T13:13:31.000Z
|
Chapter16/example1.py
|
DeeMATT/AdvancedPythonProgramming
|
97091dae4f177fd2c06b20265be2aedf9d1c41e7
|
[
"MIT"
] | 2
|
2020-03-11T19:56:39.000Z
|
2021-11-15T14:07:05.000Z
|
Chapter16/example1.py
|
DeeMATT/AdvancedPythonProgramming
|
97091dae4f177fd2c06b20265be2aedf9d1c41e7
|
[
"MIT"
] | 58
|
2018-11-03T14:06:10.000Z
|
2022-03-17T14:06:55.000Z
|
# ch9/example1.py
from math import sqrt
def is_prime(x):
print('Processing %i...' % x)
if x < 2:
print('%i is not a prime number.' % x)
elif x == 2:
print('%i is a prime number.' % x)
elif x % 2 == 0:
print('%i is not a prime number.' % x)
else:
limit = int(sqrt(x)) + 1
for i in range(3, limit, 2):
if x % i == 0:
print('%i is not a prime number.' % x)
return
print('%i is a prime number.' % x)
if __name__ == '__main__':
is_prime(9637529763296797)
is_prime(427920331)
is_prime(157)
| 19.903226
| 54
| 0.49919
|
4b68bc9e67f072791486b9c244996278742510b6
| 1,076
|
py
|
Python
|
utils.py
|
KuanHaoHuang/tbrain-tomofun-audio-classification
|
6040c8d58f6738795596c166eb008d9c21c05cd1
|
[
"MIT"
] | 2
|
2021-08-17T10:57:58.000Z
|
2021-09-01T01:32:13.000Z
|
utils.py
|
KuanHaoHuang/tbrain-tomofun-audio-classification
|
6040c8d58f6738795596c166eb008d9c21c05cd1
|
[
"MIT"
] | null | null | null |
utils.py
|
KuanHaoHuang/tbrain-tomofun-audio-classification
|
6040c8d58f6738795596c166eb008d9c21c05cd1
|
[
"MIT"
] | 3
|
2021-09-01T01:32:22.000Z
|
2021-12-13T01:44:52.000Z
|
import librosa
import numpy as np
import pickle as pkl
import re
from pathlib import Path
import torch
import torchvision
import torchaudio
from PIL import Image
SAMPLING_RATE = 8000
num_channels = 3
window_sizes = [25, 50, 100]
hop_sizes = [10, 25, 50]
eps = 1e-6
limits = ((-2, 2), (0.9, 1.2))
def extract_feature(file_path):
clip, sr = librosa.load(file_path, sr=SAMPLING_RATE)
specs = []
for i in range(num_channels):
window_length = int(round(window_sizes[i]*SAMPLING_RATE/1000))
hop_length = int(round(hop_sizes[i]*SAMPLING_RATE/1000))
clip = torch.Tensor(clip)
spec = torchaudio.transforms.MelSpectrogram(sample_rate=SAMPLING_RATE, n_fft=4410, win_length=window_length, hop_length=hop_length, n_mels=128)(clip)
spec = spec.numpy()
spec = np.log(spec+eps)
spec = np.asarray(torchvision.transforms.Resize((128, 250))(Image.fromarray(spec)))
specs.append(spec)
new_entry = {}
new_entry["audio"] = clip.numpy()
new_entry["values"] = np.array(specs)
return new_entry
| 31.647059
| 157
| 0.685874
|
2f9e416e6df279574c37fb34c97013c6d94c59c8
| 2,841
|
py
|
Python
|
tests/database/test_psycopg2.py
|
uranusjr/sqlian
|
8f029e91af032e23ebb95cb599aa7267ebe75e05
|
[
"0BSD"
] | null | null | null |
tests/database/test_psycopg2.py
|
uranusjr/sqlian
|
8f029e91af032e23ebb95cb599aa7267ebe75e05
|
[
"0BSD"
] | null | null | null |
tests/database/test_psycopg2.py
|
uranusjr/sqlian
|
8f029e91af032e23ebb95cb599aa7267ebe75e05
|
[
"0BSD"
] | null | null | null |
import pytest
from sqlian import connect, star
from sqlian.postgresql import Psycopg2Database
psycopg2 = pytest.importorskip('psycopg2')
@pytest.fixture(scope='module')
def database_name(request):
try:
conn = psycopg2.connect(database='postgres')
except psycopg2.OperationalError:
pytest.skip('database unavailable')
return None
conn.autocommit = True # Required for CREATE DATABASE.
database_name = 'test_sqlian_psycopg2'
with conn.cursor() as cursor:
cursor.execute('CREATE DATABASE "{}"'.format(database_name))
def finalize():
with conn.cursor() as cursor:
cursor.execute('DROP DATABASE "{}"'.format(database_name))
conn.close()
request.addfinalizer(finalize)
return database_name
@pytest.fixture
def db(request, database_name):
db = Psycopg2Database(database=database_name)
with db.cursor() as cursor:
cursor.execute('''
DROP TABLE IF EXISTS "person"
''')
cursor.execute('''
CREATE TABLE "person" (
"name" VARCHAR(10),
"occupation" VARCHAR(10),
"main_language" VARCHAR(10))
''')
cursor.execute('''
INSERT INTO "person" ("name", "occupation", "main_language")
VALUES ('Mosky', 'Pinkoi', 'Python')
''')
def finalize():
db.close()
request.addfinalizer(finalize)
return db
def test_select(db):
rows = db.select(star, from_='person')
record, = list(rows)
assert record[0] == 'Mosky'
assert record['occupation'] == 'Pinkoi'
assert record.main_language == 'Python'
def test_insert(db):
rows = db.insert('person', values={
'name': 'Keith',
'occupation': 'iCHEF',
'main_language': 'Python',
})
with pytest.raises(db.ProgrammingError) as ctx:
len(rows)
assert str(ctx.value) == 'no results to fetch'
names = [r.name for r in db.select('name', from_='person')]
assert names == ['Mosky', 'Keith']
@pytest.mark.parametrize('scheme', ['postgresql', 'psycopg2+postgresql'])
def test_connect(database_name, scheme):
db = connect('{scheme}:///{db}?client_encoding=utf8'.format(
scheme=scheme, db=database_name,
))
assert db.is_open()
with db.cursor() as cursor:
cursor.execute('''CREATE TABLE "person" ("name" TEXT)''')
cursor.execute('''INSERT INTO "person" VALUES ('Mosky')''')
record, = db.select(star, from_='person')
assert record.name == 'Mosky'
@pytest.mark.parametrize('scheme', ['postgresql', 'psycopg2+postgresql'])
def test_connect_failure(database_name, scheme):
with pytest.raises(psycopg2.ProgrammingError):
connect('{scheme}:///{db}?invalid_option=1'.format(
scheme=scheme, db=database_name,
))
| 28.41
| 73
| 0.623372
|
b10e1fee45811ece6ed4d199a4b72e2998fcfce8
| 3,211
|
py
|
Python
|
examples/variational_autoencoder.py
|
codeheadshopon/keras
|
3a4c683d5c83b53d401f0eef6d930a23ad3db7d7
|
[
"MIT"
] | 1
|
2016-08-29T15:07:53.000Z
|
2016-08-29T15:07:53.000Z
|
examples/variational_autoencoder.py
|
sabirdvd/keras
|
3a4c683d5c83b53d401f0eef6d930a23ad3db7d7
|
[
"MIT"
] | null | null | null |
examples/variational_autoencoder.py
|
sabirdvd/keras
|
3a4c683d5c83b53d401f0eef6d930a23ad3db7d7
|
[
"MIT"
] | 1
|
2016-09-07T13:18:58.000Z
|
2016-09-07T13:18:58.000Z
|
'''This script demonstrates how to build a variational autoencoder with Keras.
Reference: "Auto-Encoding Variational Bayes" https://arxiv.org/abs/1312.6114
'''
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import objectives
from keras.datasets import mnist
batch_size = 100
original_dim = 784
latent_dim = 2
intermediate_dim = 256
nb_epoch = 50
x = Input(batch_shape=(batch_size, original_dim))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0.)
return z_mean + K.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
vae = Model(x, x_decoded_mean)
vae.compile(optimizer='rmsprop', loss=vae_loss)
# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
vae.fit(x_train, x_train,
shuffle=True,
nb_epoch=nb_epoch,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# we will sample n points within [-15, 15] standard deviations
grid_x = np.linspace(-15, 15, n)
grid_y = np.linspace(-15, 15, n)
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = generator.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure)
plt.show()
| 32.765306
| 89
| 0.728745
|
bf9bc91b6bbcddd8d9a2ac17529fb4bb255063de
| 1,616
|
py
|
Python
|
Python/Caesar_Cipher/cipher.py
|
iamakkkhil/Rotten-Scripts
|
116ae502271d699db88add5fd1cf733d01134b7d
|
[
"MIT"
] | 1,127
|
2020-02-16T04:14:00.000Z
|
2022-03-31T21:37:24.000Z
|
Python/Caesar_Cipher/cipher.py
|
iamakkkhil/Rotten-Scripts
|
116ae502271d699db88add5fd1cf733d01134b7d
|
[
"MIT"
] | 1,123
|
2020-06-20T04:00:11.000Z
|
2022-03-31T13:23:45.000Z
|
Python/Caesar_Cipher/cipher.py
|
iamakkkhil/Rotten-Scripts
|
116ae502271d699db88add5fd1cf733d01134b7d
|
[
"MIT"
] | 669
|
2020-05-30T16:14:43.000Z
|
2022-03-31T14:36:11.000Z
|
"""
A Python Script to implement Caesar Cipher. The technique is really basic.
# It shifts every character by a certain number (Shift Key)
# This number is secret and only the sender, receiver knows it.
# Using Such a Key, the message can be easily decoded as well.
# This Script Focuses on Encryption Part
"""
def cipher(imput_string, shift_key):
"""
Implementation of Crypto Technique.
Params: input_string (required), shift_key (required)
Returns: encrypted_string
:type imput_string: str
:type shift_key: int
"""
# Initialise str to store the encrypted message
encrypted_string = ""
for text in imput_string:
"""
There are 3 possibilities
- Lower Case
- Upper Case
- Blank Space
"""
if text == " ":
# For Blank Space, encrypted as it is
encrypted_string += text
elif text.isupper():
# For Upper Case
encrypted_string = encrypted_string + chr(
(ord(text) + shift_key - 65) % 26 + 65
)
else:
# For Lower Case
encrypted_string = encrypted_string + chr(
(ord(text) + shift_key - 97) % 26 + 97
)
return encrypted_string
if __name__ == "__main__":
"""
Function Calling
"""
imput_string = input("Enter the text to be encrypted: ")
shift = int(input("Enter the shift key: "))
print("Text before Encryption: ", imput_string)
print("Shift Key: ", shift)
print("Encrypted text: ", cipher(imput_string, shift))
| 30.490566
| 74
| 0.592203
|
b13cfd7d37030373fe922c99d4ca46f93ccbcf6a
| 53,580
|
py
|
Python
|
homeassistant/components/alexa/capabilities.py
|
lekobob/home-assistant
|
31996120dd19541499d868f8f97c1ecb0a7dd8aa
|
[
"Apache-2.0"
] | 2
|
2018-07-17T06:40:53.000Z
|
2020-08-11T09:44:09.000Z
|
homeassistant/components/alexa/capabilities.py
|
lekobob/home-assistant
|
31996120dd19541499d868f8f97c1ecb0a7dd8aa
|
[
"Apache-2.0"
] | 1
|
2020-07-29T22:08:40.000Z
|
2020-07-29T22:08:40.000Z
|
homeassistant/components/alexa/capabilities.py
|
lekobob/home-assistant
|
31996120dd19541499d868f8f97c1ecb0a7dd8aa
|
[
"Apache-2.0"
] | 6
|
2019-12-01T19:06:52.000Z
|
2020-09-17T00:57:06.000Z
|
"""Alexa capabilities."""
import logging
from homeassistant.components import (
cover,
fan,
image_processing,
input_number,
light,
vacuum,
)
from homeassistant.components.alarm_control_panel import ATTR_CODE_FORMAT, FORMAT_NUMBER
import homeassistant.components.climate.const as climate
import homeassistant.components.media_player.const as media_player
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
STATE_UNLOCKED,
)
import homeassistant.util.color as color_util
import homeassistant.util.dt as dt_util
from .const import (
API_TEMP_UNITS,
API_THERMOSTAT_MODES,
API_THERMOSTAT_PRESETS,
DATE_FORMAT,
PERCENTAGE_FAN_MAP,
Inputs,
)
from .errors import UnsupportedProperty
from .resources import (
AlexaCapabilityResource,
AlexaGlobalCatalog,
AlexaModeResource,
AlexaPresetResource,
AlexaSemantics,
)
_LOGGER = logging.getLogger(__name__)
class AlexaCapability:
"""Base class for Alexa capability interfaces.
The Smart Home Skills API defines a number of "capability interfaces",
roughly analogous to domains in Home Assistant. The supported interfaces
describe what actions can be performed on a particular device.
https://developer.amazon.com/docs/device-apis/message-guide.html
"""
supported_locales = {"en-US"}
def __init__(self, entity, instance=None):
"""Initialize an Alexa capability."""
self.entity = entity
self.instance = instance
def name(self):
"""Return the Alexa API name of this interface."""
raise NotImplementedError
@staticmethod
def properties_supported():
"""Return what properties this entity supports."""
return []
@staticmethod
def properties_proactively_reported():
"""Return True if properties asynchronously reported."""
return False
@staticmethod
def properties_retrievable():
"""Return True if properties can be retrieved."""
return False
@staticmethod
def properties_non_controllable():
"""Return True if non controllable."""
return None
@staticmethod
def get_property(name):
"""Read and return a property.
Return value should be a dict, or raise UnsupportedProperty.
Properties can also have a timeOfSample and uncertaintyInMilliseconds,
but returning those metadata is not yet implemented.
"""
raise UnsupportedProperty(name)
@staticmethod
def supports_deactivation():
"""Applicable only to scenes."""
return None
@staticmethod
def capability_proactively_reported():
"""Return True if the capability is proactively reported.
Set properties_proactively_reported() for proactively reported properties.
Applicable to DoorbellEventSource.
"""
return None
@staticmethod
def capability_resources():
"""Return the capability object.
Applicable to ToggleController, RangeController, and ModeController interfaces.
"""
return []
@staticmethod
def configuration():
"""Return the configuration object.
Applicable to the ThermostatController, SecurityControlPanel, ModeController, RangeController,
and EventDetectionSensor.
"""
return []
@staticmethod
def configurations():
"""Return the configurations object.
The plural configurations object is different that the singular configuration object.
Applicable to EqualizerController interface.
"""
return []
@staticmethod
def inputs():
"""Applicable only to media players."""
return []
@staticmethod
def semantics():
"""Return the semantics object.
Applicable to ToggleController, RangeController, and ModeController interfaces.
"""
return []
@staticmethod
def supported_operations():
"""Return the supportedOperations object."""
return []
def serialize_discovery(self):
"""Serialize according to the Discovery API."""
result = {"type": "AlexaInterface", "interface": self.name(), "version": "3"}
instance = self.instance
if instance is not None:
result["instance"] = instance
properties_supported = self.properties_supported()
if properties_supported:
result["properties"] = {
"supported": self.properties_supported(),
"proactivelyReported": self.properties_proactively_reported(),
"retrievable": self.properties_retrievable(),
}
proactively_reported = self.capability_proactively_reported()
if proactively_reported is not None:
result["proactivelyReported"] = proactively_reported
non_controllable = self.properties_non_controllable()
if non_controllable is not None:
result["properties"]["nonControllable"] = non_controllable
supports_deactivation = self.supports_deactivation()
if supports_deactivation is not None:
result["supportsDeactivation"] = supports_deactivation
capability_resources = self.capability_resources()
if capability_resources:
result["capabilityResources"] = capability_resources
configuration = self.configuration()
if configuration:
result["configuration"] = configuration
# The plural configurations object is different than the singular configuration object above.
configurations = self.configurations()
if configurations:
result["configurations"] = configurations
semantics = self.semantics()
if semantics:
result["semantics"] = semantics
supported_operations = self.supported_operations()
if supported_operations:
result["supportedOperations"] = supported_operations
inputs = self.inputs()
if inputs:
result["inputs"] = inputs
return result
def serialize_properties(self):
"""Return properties serialized for an API response."""
for prop in self.properties_supported():
prop_name = prop["name"]
# pylint: disable=assignment-from-no-return
prop_value = self.get_property(prop_name)
if prop_value is not None:
result = {
"name": prop_name,
"namespace": self.name(),
"value": prop_value,
"timeOfSample": dt_util.utcnow().strftime(DATE_FORMAT),
"uncertaintyInMilliseconds": 0,
}
instance = self.instance
if instance is not None:
result["instance"] = instance
yield result
class Alexa(AlexaCapability):
"""Implements Alexa Interface.
Although endpoints implement this interface implicitly,
The API suggests you should explicitly include this interface.
https://developer.amazon.com/docs/device-apis/alexa-interface.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa"
class AlexaEndpointHealth(AlexaCapability):
"""Implements Alexa.EndpointHealth.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#report-state-when-alexa-requests-it
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EndpointHealth"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "connectivity"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "connectivity":
raise UnsupportedProperty(name)
if self.entity.state == STATE_UNAVAILABLE:
return {"value": "UNREACHABLE"}
return {"value": "OK"}
class AlexaPowerController(AlexaCapability):
"""Implements Alexa.PowerController.
https://developer.amazon.com/docs/device-apis/alexa-powercontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerState":
raise UnsupportedProperty(name)
if self.entity.domain == climate.DOMAIN:
is_on = self.entity.state != climate.HVAC_MODE_OFF
else:
is_on = self.entity.state != STATE_OFF
return "ON" if is_on else "OFF"
class AlexaLockController(AlexaCapability):
"""Implements Alexa.LockController.
https://developer.amazon.com/docs/device-apis/alexa-lockcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-US",
"es-ES",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.LockController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "lockState"}]
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "lockState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_LOCKED:
return "LOCKED"
if self.entity.state == STATE_UNLOCKED:
return "UNLOCKED"
return "JAMMED"
class AlexaSceneController(AlexaCapability):
"""Implements Alexa.SceneController.
https://developer.amazon.com/docs/device-apis/alexa-scenecontroller.html
"""
supported_locales = {
"de-DE",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
}
def __init__(self, entity, supports_deactivation):
"""Initialize the entity."""
super().__init__(entity)
self.supports_deactivation = lambda: supports_deactivation
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SceneController"
class AlexaBrightnessController(AlexaCapability):
"""Implements Alexa.BrightnessController.
https://developer.amazon.com/docs/device-apis/alexa-brightnesscontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.BrightnessController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "brightness"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "brightness":
raise UnsupportedProperty(name)
if "brightness" in self.entity.attributes:
return round(self.entity.attributes["brightness"] / 255.0 * 100)
return 0
class AlexaColorController(AlexaCapability):
"""Implements Alexa.ColorController.
https://developer.amazon.com/docs/device-apis/alexa-colorcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "color"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "color":
raise UnsupportedProperty(name)
hue, saturation = self.entity.attributes.get(light.ATTR_HS_COLOR, (0, 0))
return {
"hue": hue,
"saturation": saturation / 100.0,
"brightness": self.entity.attributes.get(light.ATTR_BRIGHTNESS, 0) / 255.0,
}
class AlexaColorTemperatureController(AlexaCapability):
"""Implements Alexa.ColorTemperatureController.
https://developer.amazon.com/docs/device-apis/alexa-colortemperaturecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ColorTemperatureController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "colorTemperatureInKelvin"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "colorTemperatureInKelvin":
raise UnsupportedProperty(name)
if "color_temp" in self.entity.attributes:
return color_util.color_temperature_mired_to_kelvin(
self.entity.attributes["color_temp"]
)
return None
class AlexaPercentageController(AlexaCapability):
"""Implements Alexa.PercentageController.
https://developer.amazon.com/docs/device-apis/alexa-percentagecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PercentageController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "percentage"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "percentage":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, 0)
if self.entity.domain == cover.DOMAIN:
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION, 0)
return 0
class AlexaSpeaker(AlexaCapability):
"""Implements Alexa.Speaker.
https://developer.amazon.com/docs/device-apis/alexa-speaker.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.Speaker"
class AlexaStepSpeaker(AlexaCapability):
"""Implements Alexa.StepSpeaker.
https://developer.amazon.com/docs/device-apis/alexa-stepspeaker.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.StepSpeaker"
class AlexaPlaybackController(AlexaCapability):
"""Implements Alexa.PlaybackController.
https://developer.amazon.com/docs/device-apis/alexa-playbackcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US", "fr-FR"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackController"
def supported_operations(self):
"""Return the supportedOperations object.
Supported Operations: FastForward, Next, Pause, Play, Previous, Rewind, StartOver, Stop
"""
supported_features = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
operations = {
media_player.SUPPORT_NEXT_TRACK: "Next",
media_player.SUPPORT_PAUSE: "Pause",
media_player.SUPPORT_PLAY: "Play",
media_player.SUPPORT_PREVIOUS_TRACK: "Previous",
media_player.SUPPORT_STOP: "Stop",
}
supported_operations = []
for operation in operations:
if operation & supported_features:
supported_operations.append(operations[operation])
return supported_operations
class AlexaInputController(AlexaCapability):
"""Implements Alexa.InputController.
https://developer.amazon.com/docs/device-apis/alexa-inputcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.InputController"
def inputs(self):
"""Return the list of valid supported inputs."""
source_list = self.entity.attributes.get(
media_player.ATTR_INPUT_SOURCE_LIST, []
)
input_list = []
for source in source_list:
formatted_source = (
source.lower().replace("-", "").replace("_", "").replace(" ", "")
)
if formatted_source in Inputs.VALID_SOURCE_NAME_MAP.keys():
input_list.append(
{"name": Inputs.VALID_SOURCE_NAME_MAP[formatted_source]}
)
return input_list
class AlexaTemperatureSensor(AlexaCapability):
"""Implements Alexa.TemperatureSensor.
https://developer.amazon.com/docs/device-apis/alexa-temperaturesensor.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TemperatureSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "temperature"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "temperature":
raise UnsupportedProperty(name)
unit = self.entity.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = self.entity.state
if self.entity.domain == climate.DOMAIN:
unit = self.hass.config.units.temperature_unit
temp = self.entity.attributes.get(climate.ATTR_CURRENT_TEMPERATURE)
if temp in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning("Invalid temp value %s for %s", temp, self.entity.entity_id)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
class AlexaContactSensor(AlexaCapability):
"""Implements Alexa.ContactSensor.
The Alexa.ContactSensor interface describes the properties and events used
to report the state of an endpoint that detects contact between two
surfaces. For example, a contact sensor can report whether a door or window
is open.
https://developer.amazon.com/docs/device-apis/alexa-contactsensor.html
"""
supported_locales = {"en-CA", "en-US"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ContactSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaMotionSensor(AlexaCapability):
"""Implements Alexa.MotionSensor.
https://developer.amazon.com/docs/device-apis/alexa-motionsensor.html
"""
supported_locales = {"en-CA", "en-US"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.MotionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "detectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "detectionState":
raise UnsupportedProperty(name)
if self.entity.state == STATE_ON:
return "DETECTED"
return "NOT_DETECTED"
class AlexaThermostatController(AlexaCapability):
"""Implements Alexa.ThermostatController.
https://developer.amazon.com/docs/device-apis/alexa-thermostatcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ThermostatController"
def properties_supported(self):
"""Return what properties this entity supports."""
properties = [{"name": "thermostatMode"}]
supported = self.entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if supported & climate.SUPPORT_TARGET_TEMPERATURE:
properties.append({"name": "targetSetpoint"})
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
properties.append({"name": "lowerSetpoint"})
properties.append({"name": "upperSetpoint"})
return properties
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if self.entity.state == STATE_UNAVAILABLE:
return None
if name == "thermostatMode":
preset = self.entity.attributes.get(climate.ATTR_PRESET_MODE)
if preset in API_THERMOSTAT_PRESETS:
mode = API_THERMOSTAT_PRESETS[preset]
else:
mode = API_THERMOSTAT_MODES.get(self.entity.state)
if mode is None:
_LOGGER.error(
"%s (%s) has unsupported state value '%s'",
self.entity.entity_id,
type(self.entity),
self.entity.state,
)
raise UnsupportedProperty(name)
return mode
unit = self.hass.config.units.temperature_unit
if name == "targetSetpoint":
temp = self.entity.attributes.get(ATTR_TEMPERATURE)
elif name == "lowerSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_LOW)
elif name == "upperSetpoint":
temp = self.entity.attributes.get(climate.ATTR_TARGET_TEMP_HIGH)
else:
raise UnsupportedProperty(name)
if temp is None:
return None
try:
temp = float(temp)
except ValueError:
_LOGGER.warning(
"Invalid temp value %s for %s in %s", temp, name, self.entity.entity_id
)
return None
return {"value": temp, "scale": API_TEMP_UNITS[unit]}
def configuration(self):
"""Return configuration object.
Translates climate HVAC_MODES and PRESETS to supported Alexa ThermostatMode Values.
ThermostatMode Value must be AUTO, COOL, HEAT, ECO, OFF, or CUSTOM.
"""
supported_modes = []
hvac_modes = self.entity.attributes.get(climate.ATTR_HVAC_MODES)
for mode in hvac_modes:
thermostat_mode = API_THERMOSTAT_MODES.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
preset_modes = self.entity.attributes.get(climate.ATTR_PRESET_MODES)
if preset_modes:
for mode in preset_modes:
thermostat_mode = API_THERMOSTAT_PRESETS.get(mode)
if thermostat_mode:
supported_modes.append(thermostat_mode)
# Return False for supportsScheduling until supported with event listener in handler.
configuration = {"supportsScheduling": False}
if supported_modes:
configuration["supportedModes"] = supported_modes
return configuration
class AlexaPowerLevelController(AlexaCapability):
"""Implements Alexa.PowerLevelController.
https://developer.amazon.com/docs/device-apis/alexa-powerlevelcontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"fr-FR",
"it-IT",
"ja-JP",
}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PowerLevelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "powerLevel"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "powerLevel":
raise UnsupportedProperty(name)
if self.entity.domain == fan.DOMAIN:
speed = self.entity.attributes.get(fan.ATTR_SPEED)
return PERCENTAGE_FAN_MAP.get(speed, None)
return None
class AlexaSecurityPanelController(AlexaCapability):
"""Implements Alexa.SecurityPanelController.
https://developer.amazon.com/docs/device-apis/alexa-securitypanelcontroller.html
"""
supported_locales = {"en-AU", "en-CA", "en-IN", "en-US"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SecurityPanelController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "armState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "armState":
raise UnsupportedProperty(name)
arm_state = self.entity.state
if arm_state == STATE_ALARM_ARMED_HOME:
return "ARMED_STAY"
if arm_state == STATE_ALARM_ARMED_AWAY:
return "ARMED_AWAY"
if arm_state == STATE_ALARM_ARMED_NIGHT:
return "ARMED_NIGHT"
if arm_state == STATE_ALARM_ARMED_CUSTOM_BYPASS:
return "ARMED_STAY"
return "DISARMED"
def configuration(self):
"""Return configuration object with supported authorization types."""
code_format = self.entity.attributes.get(ATTR_CODE_FORMAT)
if code_format == FORMAT_NUMBER:
return {"supportedAuthorizationTypes": [{"type": "FOUR_DIGIT_PIN"}]}
return None
class AlexaModeController(AlexaCapability):
"""Implements Alexa.ModeController.
https://developer.amazon.com/docs/device-apis/alexa-modecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ModeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "mode"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "mode":
raise UnsupportedProperty(name)
# Fan Direction
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
mode = self.entity.attributes.get(fan.ATTR_DIRECTION, None)
if mode in (fan.DIRECTION_FORWARD, fan.DIRECTION_REVERSE, STATE_UNKNOWN):
return f"{fan.ATTR_DIRECTION}.{mode}"
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
# Return state instead of position when using ModeController.
mode = self.entity.state
if mode in (
cover.STATE_OPEN,
cover.STATE_OPENING,
cover.STATE_CLOSED,
cover.STATE_CLOSING,
STATE_UNKNOWN,
):
return f"{cover.ATTR_POSITION}.{mode}"
return None
def configuration(self):
"""Return configuration with modeResources."""
if isinstance(self._resource, AlexaCapabilityResource):
return self._resource.serialize_configuration()
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Direction Resource
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_DIRECTION}":
self._resource = AlexaModeResource(
[AlexaGlobalCatalog.SETTING_DIRECTION], False
)
self._resource.add_mode(
f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_FORWARD}", [fan.DIRECTION_FORWARD]
)
self._resource.add_mode(
f"{fan.ATTR_DIRECTION}.{fan.DIRECTION_REVERSE}", [fan.DIRECTION_REVERSE]
)
return self._resource.serialize_capability_resources()
# Cover Position Resources
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._resource = AlexaModeResource(
["Position", AlexaGlobalCatalog.SETTING_OPENING], False
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}",
[AlexaGlobalCatalog.VALUE_OPEN],
)
self._resource.add_mode(
f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}",
[AlexaGlobalCatalog.VALUE_CLOSE],
)
self._resource.add_mode(f"{cover.ATTR_POSITION}.custom", ["Custom"])
return self._resource.serialize_capability_resources()
return None
def semantics(self):
"""Build and return semantics object."""
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._semantics = AlexaSemantics()
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_CLOSE, AlexaSemantics.ACTION_LOWER],
"SetMode",
{"mode": f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}"},
)
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_OPEN, AlexaSemantics.ACTION_RAISE],
"SetMode",
{"mode": f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}"},
)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_CLOSED],
f"{cover.ATTR_POSITION}.{cover.STATE_CLOSED}",
)
self._semantics.add_states_to_value(
[AlexaSemantics.STATES_OPEN],
f"{cover.ATTR_POSITION}.{cover.STATE_OPEN}",
)
return self._semantics.serialize_semantics()
return None
class AlexaRangeController(AlexaCapability):
"""Implements Alexa.RangeController.
https://developer.amazon.com/docs/device-apis/alexa-rangecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.RangeController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "rangeValue"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "rangeValue":
raise UnsupportedProperty(name)
# Fan Speed
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
speed_list = self.entity.attributes[fan.ATTR_SPEED_LIST]
speed = self.entity.attributes[fan.ATTR_SPEED]
speed_index = next(
(i for i, v in enumerate(speed_list) if v == speed), None
)
return speed_index
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
return self.entity.attributes.get(cover.ATTR_CURRENT_POSITION)
# Cover Tilt Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_TILT_POSITION}":
return self.entity.attributes.get(cover.ATTR_CURRENT_TILT_POSITION)
# Input Number Value
if self.instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
return float(self.entity.state)
# Vacuum Fan Speed
if self.instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
speed_list = self.entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
speed = self.entity.attributes[vacuum.ATTR_FAN_SPEED]
speed_index = next(
(i for i, v in enumerate(speed_list) if v == speed), None
)
return speed_index
return None
def configuration(self):
"""Return configuration with presetResources."""
if isinstance(self._resource, AlexaCapabilityResource):
return self._resource.serialize_configuration()
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Speed Resources
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_SPEED}":
speed_list = self.entity.attributes[fan.ATTR_SPEED_LIST]
max_value = len(speed_list) - 1
self._resource = AlexaPresetResource(
labels=[AlexaGlobalCatalog.SETTING_FAN_SPEED],
min_value=0,
max_value=max_value,
precision=1,
)
for index, speed in enumerate(speed_list):
labels = [speed.replace("_", " ")]
if index == 1:
labels.append(AlexaGlobalCatalog.VALUE_MINIMUM)
if index == max_value:
labels.append(AlexaGlobalCatalog.VALUE_MAXIMUM)
self._resource.add_preset(value=index, labels=labels)
return self._resource.serialize_capability_resources()
# Cover Position Resources
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._resource = AlexaPresetResource(
["Position", AlexaGlobalCatalog.SETTING_OPENING],
min_value=0,
max_value=100,
precision=1,
unit=AlexaGlobalCatalog.UNIT_PERCENT,
)
return self._resource.serialize_capability_resources()
# Cover Tilt Position Resources
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_TILT_POSITION}":
self._resource = AlexaPresetResource(
["Tilt Position", AlexaGlobalCatalog.SETTING_OPENING],
min_value=0,
max_value=100,
precision=1,
unit=AlexaGlobalCatalog.UNIT_PERCENT,
)
return self._resource.serialize_capability_resources()
# Input Number Value
if self.instance == f"{input_number.DOMAIN}.{input_number.ATTR_VALUE}":
min_value = float(self.entity.attributes[input_number.ATTR_MIN])
max_value = float(self.entity.attributes[input_number.ATTR_MAX])
precision = float(self.entity.attributes.get(input_number.ATTR_STEP, 1))
unit = self.entity.attributes.get(input_number.ATTR_UNIT_OF_MEASUREMENT)
self._resource = AlexaPresetResource(
["Value"],
min_value=min_value,
max_value=max_value,
precision=precision,
unit=unit,
)
self._resource.add_preset(
value=min_value, labels=[AlexaGlobalCatalog.VALUE_MINIMUM]
)
self._resource.add_preset(
value=max_value, labels=[AlexaGlobalCatalog.VALUE_MAXIMUM]
)
return self._resource.serialize_capability_resources()
# Vacuum Fan Speed Resources
if self.instance == f"{vacuum.DOMAIN}.{vacuum.ATTR_FAN_SPEED}":
speed_list = self.entity.attributes[vacuum.ATTR_FAN_SPEED_LIST]
max_value = len(speed_list) - 1
self._resource = AlexaPresetResource(
labels=[AlexaGlobalCatalog.SETTING_FAN_SPEED],
min_value=0,
max_value=max_value,
precision=1,
)
for index, speed in enumerate(speed_list):
labels = [speed.replace("_", " ")]
if index == 1:
labels.append(AlexaGlobalCatalog.VALUE_MINIMUM)
if index == max_value:
labels.append(AlexaGlobalCatalog.VALUE_MAXIMUM)
self._resource.add_preset(value=index, labels=labels)
return self._resource.serialize_capability_resources()
return None
def semantics(self):
"""Build and return semantics object."""
# Cover Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_POSITION}":
self._semantics = AlexaSemantics()
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_LOWER], "SetRangeValue", {"rangeValue": 0}
)
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_RAISE], "SetRangeValue", {"rangeValue": 100}
)
self._semantics.add_states_to_value([AlexaSemantics.STATES_CLOSED], value=0)
self._semantics.add_states_to_range(
[AlexaSemantics.STATES_OPEN], min_value=1, max_value=100
)
return self._semantics.serialize_semantics()
# Cover Tilt Position
if self.instance == f"{cover.DOMAIN}.{cover.ATTR_TILT_POSITION}":
self._semantics = AlexaSemantics()
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_CLOSE], "SetRangeValue", {"rangeValue": 0}
)
self._semantics.add_action_to_directive(
[AlexaSemantics.ACTION_OPEN], "SetRangeValue", {"rangeValue": 100}
)
self._semantics.add_states_to_value([AlexaSemantics.STATES_CLOSED], value=0)
self._semantics.add_states_to_range(
[AlexaSemantics.STATES_OPEN], min_value=1, max_value=100
)
return self._semantics.serialize_semantics()
return None
class AlexaToggleController(AlexaCapability):
"""Implements Alexa.ToggleController.
https://developer.amazon.com/docs/device-apis/alexa-togglecontroller.html
"""
supported_locales = {
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
}
def __init__(self, entity, instance, non_controllable=False):
"""Initialize the entity."""
super().__init__(entity, instance)
self._resource = None
self._semantics = None
self.properties_non_controllable = lambda: non_controllable
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ToggleController"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "toggleState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "toggleState":
raise UnsupportedProperty(name)
# Fan Oscillating
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
is_on = bool(self.entity.attributes.get(fan.ATTR_OSCILLATING))
return "ON" if is_on else "OFF"
return None
def capability_resources(self):
"""Return capabilityResources object."""
# Fan Oscillating Resource
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_OSCILLATING}":
self._resource = AlexaCapabilityResource(
[AlexaGlobalCatalog.SETTING_OSCILLATE, "Rotate", "Rotation"]
)
return self._resource.serialize_capability_resources()
return None
class AlexaChannelController(AlexaCapability):
"""Implements Alexa.ChannelController.
https://developer.amazon.com/docs/device-apis/alexa-channelcontroller.html
"""
supported_locales = {"de-DE", "en-AU", "en-CA", "en-GB", "en-IN", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.ChannelController"
class AlexaDoorbellEventSource(AlexaCapability):
"""Implements Alexa.DoorbellEventSource.
https://developer.amazon.com/docs/device-apis/alexa-doorbelleventsource.html
"""
supported_locales = {"en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.DoorbellEventSource"
def capability_proactively_reported(self):
"""Return True for proactively reported capability."""
return True
class AlexaPlaybackStateReporter(AlexaCapability):
"""Implements Alexa.PlaybackStateReporter.
https://developer.amazon.com/docs/device-apis/alexa-playbackstatereporter.html
"""
supported_locales = {"de-DE", "en-GB", "en-US", "fr-FR"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.PlaybackStateReporter"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "playbackState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def properties_retrievable(self):
"""Return True if properties can be retrieved."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "playbackState":
raise UnsupportedProperty(name)
playback_state = self.entity.state
if playback_state == STATE_PLAYING:
return {"state": "PLAYING"}
if playback_state == STATE_PAUSED:
return {"state": "PAUSED"}
return {"state": "STOPPED"}
class AlexaSeekController(AlexaCapability):
"""Implements Alexa.SeekController.
https://developer.amazon.com/docs/device-apis/alexa-seekcontroller.html
"""
supported_locales = {"de-DE", "en-GB", "en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.SeekController"
class AlexaEventDetectionSensor(AlexaCapability):
"""Implements Alexa.EventDetectionSensor.
https://developer.amazon.com/docs/device-apis/alexa-eventdetectionsensor.html
"""
supported_locales = {"en-US"}
def __init__(self, hass, entity):
"""Initialize the entity."""
super().__init__(entity)
self.hass = hass
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EventDetectionSensor"
def properties_supported(self):
"""Return what properties this entity supports."""
return [{"name": "humanPresenceDetectionState"}]
def properties_proactively_reported(self):
"""Return True if properties asynchronously reported."""
return True
def get_property(self, name):
"""Read and return a property."""
if name != "humanPresenceDetectionState":
raise UnsupportedProperty(name)
human_presence = "NOT_DETECTED"
state = self.entity.state
# Return None for unavailable and unknown states.
# Allows the Alexa.EndpointHealth Interface to handle the unavailable state in a stateReport.
if state in (STATE_UNAVAILABLE, STATE_UNKNOWN, None):
return None
if self.entity.domain == image_processing.DOMAIN:
if int(state):
human_presence = "DETECTED"
elif state == STATE_ON:
human_presence = "DETECTED"
return {"value": human_presence}
def configuration(self):
"""Return supported detection types."""
return {
"detectionMethods": ["AUDIO", "VIDEO"],
"detectionModes": {
"humanPresence": {
"featureAvailability": "ENABLED",
"supportsNotDetected": True,
}
},
}
class AlexaEqualizerController(AlexaCapability):
"""Implements Alexa.EqualizerController.
https://developer.amazon.com/en-US/docs/alexa/device-apis/alexa-equalizercontroller.html
"""
supported_locales = {"en-US"}
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.EqualizerController"
def properties_supported(self):
"""Return what properties this entity supports.
Either bands, mode or both can be specified. Only mode is supported at this time.
"""
return [{"name": "mode"}]
def get_property(self, name):
"""Read and return a property."""
if name != "mode":
raise UnsupportedProperty(name)
sound_mode = self.entity.attributes.get(media_player.ATTR_SOUND_MODE)
if sound_mode and sound_mode.upper() in (
"MOVIE",
"MUSIC",
"NIGHT",
"SPORT",
"TV",
):
return sound_mode.upper()
return None
def configurations(self):
"""Return the sound modes supported in the configurations object.
Valid Values for modes are: MOVIE, MUSIC, NIGHT, SPORT, TV.
"""
configurations = None
sound_mode_list = self.entity.attributes.get(media_player.ATTR_SOUND_MODE_LIST)
if sound_mode_list:
supported_sound_modes = []
for sound_mode in sound_mode_list:
if sound_mode.upper() in ("MOVIE", "MUSIC", "NIGHT", "SPORT", "TV"):
supported_sound_modes.append({"name": sound_mode.upper()})
configurations = {"modes": {"supported": supported_sound_modes}}
return configurations
class AlexaTimeHoldController(AlexaCapability):
"""Implements Alexa.TimeHoldController.
https://developer.amazon.com/docs/device-apis/alexa-timeholdcontroller.html
"""
supported_locales = {"en-US"}
def __init__(self, entity, allow_remote_resume=False):
"""Initialize the entity."""
super().__init__(entity)
self._allow_remote_resume = allow_remote_resume
def name(self):
"""Return the Alexa API name of this interface."""
return "Alexa.TimeHoldController"
def configuration(self):
"""Return configuration object.
Set allowRemoteResume to True if Alexa can restart the operation on the device.
When false, Alexa does not send the Resume directive.
"""
return {"allowRemoteResume": self._allow_remote_resume}
| 30.953206
| 127
| 0.610844
|
002230f3fb8240ca98fe43d8de8472283d055579
| 5,646
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
spayse/hello_world
|
5b01834dfbfbd21e8d1bf12c418097576368bc10
|
[
"MIT"
] | 1
|
2018-08-07T06:53:41.000Z
|
2018-08-07T06:53:41.000Z
|
contrib/seeds/makeseeds.py
|
spayse/hello_world
|
5b01834dfbfbd21e8d1bf12c418097576368bc10
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
spayse/hello_world
|
5b01834dfbfbd21e8d1bf12c418097576368bc10
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"144.202.86.90", "199.247.24.38", "45.77.192.71", "144.202.45.7", "45.32.175.21", "80.211.151.130", "80.211.13.159", "176.213.142.49"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/helloCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.825581
| 186
| 0.567836
|
f126241da2a02cbab7f103ead77a2c7da945648f
| 5,131
|
py
|
Python
|
pulsar/apps/ds/utils.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 1,410
|
2015-01-02T14:55:07.000Z
|
2022-03-28T17:22:06.000Z
|
pulsar/apps/ds/utils.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 194
|
2015-01-22T06:18:24.000Z
|
2020-10-20T21:21:58.000Z
|
pulsar/apps/ds/utils.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 168
|
2015-01-31T10:29:55.000Z
|
2022-03-14T10:22:24.000Z
|
import shutil
import pickle
def save_data(cfg, filename, data):
logger = cfg.configured_logger('pulsar.ds')
temp = 'temp_%s' % filename
with open(temp, 'wb') as file:
pickle.dump(data, file, protocol=2)
shutil.move(temp, filename)
logger.info('wrote data into "%s"', filename)
def sort_command(store, client, request, value):
sort_type = type(value)
right = 0
desc = False
alpha = None
start = None
end = None
storekey = None
sortby = None
dontsort = False
getops = []
N = len(request)
j = 2
while j < N:
val = request[j].lower()
right = N - j - 1
if val == b'asc':
desc = False
elif val == b'desc':
desc = True
elif val == b'alpha':
alpha = True
elif val == b'limit' and right >= 2:
try:
start = max(0, int(request[j+1]))
count = int(request[j+2])
except Exception:
return client.error_reply(store.SYNTAX_ERROR)
end = len(value) if count <= 0 else start + count
j += 2
elif val == b'store' and right >= 1:
storekey = request[j+1]
j += 1
elif val == b'by' and right >= 1:
sortby = request[j+1]
if b'*' not in sortby:
dontsort = True
j += 1
elif val == b'get' and right >= 1:
getops.append(request[j+1])
j += 1
else:
return client.error_reply(store.SYNTAX_ERROR)
j += 1
db = client.db
if sort_type is store.zset_type and dontsort:
dontsort = False
alpha = True
sortby = None
vector = []
sortable = SortableDesc if desc else Sortable
#
if not dontsort:
for val in value:
if sortby:
byval = lookup(store, db, sortby, val)
if byval is None:
vector.append((val, null))
continue
else:
byval = val
if not alpha:
try:
byval = sortable(float(byval))
except Exception:
byval = null
else:
byval = sortable(byval)
vector.append((val, byval))
vector = sorted(vector, key=lambda x: x[1])
if start is not None:
vector = vector[start:end]
vector = [val for val, _ in vector]
else:
vector = list(value)
if start is not None:
vector = vector[start:end]
if storekey is None:
if getops:
result = []
for val in vector:
for getv in getops:
gval = lookup(store, db, getv, val)
result.append(gval)
vector = result
client.reply_multi_bulk(vector)
else:
if getops:
vals = store.list_type()
empty = b''
for val in vector:
for getv in getops:
vals.append(lookup(store, db, getv, val) or empty)
else:
vals = store.list_type(vector)
if db.pop(storekey) is not None:
store._signal(store.NOTIFY_GENERIC, db, 'del', storekey)
result = len(vals)
if result:
db._data[storekey] = vals
store._signal(store.NOTIFY_LIST, db, 'sort', storekey, result)
client.reply_int(result)
def lookup(store, db, pattern, repl):
if pattern == b'#':
return repl
key = pattern.replace(b'*', repl)
bits = key.split(b'->', 1)
if len(bits) == 1:
string = db.get(key)
return bytes(string) if isinstance(string, bytearray) else None
else:
key, field = bits
hash = db.get(key)
return hash.get(field) if isinstance(hash, store.hash_type) else None
class Null:
__slots__ = ()
def __lt__(self, other):
return False
null = Null()
class Sortable:
__slots__ = ('value',)
def __init__(self, value):
self.value = value
def __lt__(self, other):
if other is null:
return True
else:
return self.value < other.value
class SortableDesc:
__slots__ = ('value',)
def __init__(self, value):
self.value = value
def __lt__(self, other):
if other is null:
return True
else:
return self.value > other.value
def count_bytes(array):
'''Count the number of bits in a byte ``array``.
It uses the Hamming weight popcount algorithm
'''
# this algorithm can be rewritten as
# for i in array:
# count += sum(b=='1' for b in bin(i)[2:])
# but this version is almost 2 times faster
count = 0
for i in array:
i = i - ((i >> 1) & 0x55555555)
i = (i & 0x33333333) + ((i >> 2) & 0x33333333)
count += (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24
return count
def and_op(x, y):
return x & y
def or_op(x, y):
return x | y
def xor_op(x, y):
return x ^ y
| 25.78392
| 77
| 0.510817
|
c1f998ee333606c07e0a91bbdaa241d5b73d0add
| 1,999
|
py
|
Python
|
scraping/normask1.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | null | null | null |
scraping/normask1.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | 2
|
2021-02-03T01:55:13.000Z
|
2021-04-30T12:46:33.000Z
|
scraping/normask1.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | null | null | null |
import urllib.request
import random
from bs4 import BeautifulSoup
from requests import get
import mysql.connector
conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product")
cursor = conn.cursor()
sql = """INSERT INTO normask (image, name, price, rating) VALUES (%s, %s, %s, %s)"""
def crawl_url(pageUrl, masknor_arr):
url = 'https://www.skinstore.com/skin-care/skincare-concern/normal-combination.list?pageNumber=1&facetFilters=averageReviewScore_auto_content:%5B4+TO+5%5D|en_brand_content:Alchimie+Forever|en_brand_content:ESPA|en_brand_content:Jurlique|en_brand_content:Manuka+Doctor|en_brand_content:Murad|en_brand_content:Peter+Thomas+Roth|en_brand_content:REN+Clean+Skincare|en_skincareproducttype_content:Mask|en_brand_content:SkinCeuticals'
page = get(url)
soup = BeautifulSoup(page.text, 'html.parser')
type(soup)
#######################################################for product 1############################################################################
mask = soup.find_all('li', class_='productListProducts_product')
try:
for masks in mask :
first_product_image = masks.find('img')['src']
img_name = random.randrange(1,500)
full_name = str(img_name) + ".jpg"
urllib.request.urlretrieve(first_product_image, full_name)
first_product_name = masks.find("h3",{"class":"productBlock_productName"}).get_text().strip()
first_product_price = masks.find("div",{"class":"productBlock_price"}).get_text().strip()
first_product_rating = masks.find("span",{"class":"visually-hidden productBlock_rating_hiddenLabel"}).get_text().strip()
masknor_arr.append((first_product_image, first_product_name, first_product_price, first_product_rating))
finally:
return masknor_arr
masknor_arr = crawl_url("", [])
print(len(masknor_arr))
cursor.executemany(sql, masknor_arr)
conn.commit()
cursor.close()
conn.close()
| 43.456522
| 433
| 0.67984
|
9a0029cec94b74efbc5c3a03460845186a2652e2
| 57,809
|
py
|
Python
|
_utils/vault.py
|
jbirdkerr/vault-formula
|
745324690c9e14b636e836d6ff780a5ff41c7415
|
[
"BSD-3-Clause"
] | null | null | null |
_utils/vault.py
|
jbirdkerr/vault-formula
|
745324690c9e14b636e836d6ff780a5ff41c7415
|
[
"BSD-3-Clause"
] | null | null | null |
_utils/vault.py
|
jbirdkerr/vault-formula
|
745324690c9e14b636e836d6ff780a5ff41c7415
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:maintainer: SaltStack
:maturity: new
:platform: all
Utilities supporting modules for Hashicorp Vault. Configuration instructions are
documented in the execution module docs.
'''
from __future__ import absolute_import, print_function, unicode_literals
import base64
import logging
import os
import requests
import json
import time
from functools import wraps
import six
import salt.crypt
import salt.exceptions
import salt.utils.versions
try:
import hcl
HAS_HCL_PARSER = True
except ImportError:
HAS_HCL_PARSER = False
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
log = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
# Load the __salt__ dunder if not already loaded (when called from utils-module)
__salt__ = None
def __virtual__(): # pylint: disable=expected-2-blank-lines-found-0
try:
global __salt__ # pylint: disable=global-statement
if not __salt__:
__salt__ = salt.loader.minion_mods(__opts__)
return True
except Exception as e:
log.error("Could not load __salt__: %s", e)
return False
def _get_token_and_url_from_master():
'''
Get a token with correct policies for the minion, and the url to the Vault
service
'''
minion_id = __grains__['id']
pki_dir = __opts__['pki_dir']
# When rendering pillars, the module executes on the master, but the token
# should be issued for the minion, so that the correct policies are applied
if __opts__.get('__role', 'minion') == 'minion':
private_key = '{0}/minion.pem'.format(pki_dir)
log.debug('Running on minion, signing token request with key %s',
private_key)
signature = base64.b64encode(
salt.crypt.sign_message(private_key, minion_id))
result = __salt__['publish.runner'](
'vault.generate_token', arg=[minion_id, signature])
else:
private_key = '{0}/master.pem'.format(pki_dir)
log.debug(
'Running on master, signing token request for %s with key %s',
minion_id, private_key)
signature = base64.b64encode(
salt.crypt.sign_message(private_key, minion_id))
result = __salt__['saltutil.runner'](
'vault.generate_token',
minion_id=minion_id,
signature=signature,
impersonated_by_master=True)
if not result:
log.error('Failed to get token from master! No result returned - '
'is the peer publish configuration correct?')
raise salt.exceptions.CommandExecutionError(result)
if not isinstance(result, dict):
log.error('Failed to get token from master! '
'Response is not a dict: %s', result)
raise salt.exceptions.CommandExecutionError(result)
if 'error' in result:
log.error('Failed to get token from master! '
'An error was returned: %s', result['error'])
raise salt.exceptions.CommandExecutionError(result)
return {
'url': result['url'],
'token': result['token'],
'verify': result['verify'],
}
def _get_vault_connection():
'''
Get the connection details for calling Vault, from local configuration if
it exists, or from the master otherwise
'''
def _use_local_config():
log.debug('Using Vault connection details from local config')
try:
if __opts__['vault']['auth']['method'] == 'approle':
verify = __opts__['vault'].get('verify', None)
if _selftoken_expired():
log.debug('Vault token expired. Recreating one')
# Requesting a short ttl token
url = '{0}/v1/auth/approle/login'.format(
__opts__['vault']['url'])
payload = {'role_id': __opts__['vault']['auth']['role_id']}
if 'secret_id' in __opts__['vault']['auth']:
payload['secret_id'] = __opts__['vault']['auth'][
'secret_id']
response = requests.post(url, json=payload, verify=verify)
if response.status_code != 200:
errmsg = 'An error occured while getting a token from approle'
raise salt.exceptions.CommandExecutionError(errmsg)
__opts__['vault']['auth']['token'] = response.json()[
'auth']['client_token']
return {
'url': __opts__['vault']['url'],
'token': __opts__['vault']['auth']['token'],
'verify': __opts__['vault'].get('verify', None)
}
except KeyError as err:
errmsg = 'Minion has "vault" config section, but could not find key "{0}" within'.format(
err.message)
raise salt.exceptions.CommandExecutionError(errmsg)
if 'vault' in __opts__ and __opts__.get('__role', 'minion') == 'master':
return _use_local_config()
elif any((__opts__['local'], __opts__['file_client'] == 'local',
__opts__['master_type'] == 'disable')):
return _use_local_config()
else:
log.debug('Contacting master for Vault connection details')
return _get_token_and_url_from_master()
def make_request(method, resource, profile=None, **args):
'''
Make a request to Vault
'''
if profile is not None and profile.keys().remove('driver') is not None:
# Deprecated code path
return make_request_with_profile(method, resource, profile, **args)
connection = _get_vault_connection()
token, vault_url = connection['token'], connection['url']
if 'verify' not in args:
args['verify'] = connection['verify']
url = "{0}/{1}".format(vault_url, resource)
headers = {'X-Vault-Token': token, 'Content-Type': 'application/json'}
response = requests.request(method, url, headers=headers, **args)
return response
def make_request_with_profile(method, resource, profile, **args):
'''
DEPRECATED! Make a request to Vault, with a profile including connection
details.
'''
salt.utils.versions.warn_until(
'Fluorine',
'Specifying Vault connection data within a \'profile\' has been '
'deprecated. Please see the documentation for details on the new '
'configuration schema. Support for this function will be removed '
'in Salt Fluorine.')
url = '{0}://{1}:{2}/v1/{3}'.format(
profile.get('vault.scheme', 'https'),
profile.get('vault.host'),
profile.get('vault.port'),
resource,
)
token = os.environ.get('VAULT_TOKEN', profile.get('vault.token'))
if token is None:
raise salt.exceptions.CommandExecutionError(
'A token was not configured')
headers = {'X-Vault-Token': token, 'Content-Type': 'application/json'}
response = requests.request(method, url, headers=headers, **args)
return response
def _selftoken_expired():
'''
Validate the current token exists and is still valid
'''
try:
verify = __opts__['vault'].get('verify', None)
url = '{0}/v1/auth/token/lookup-self'.format(__opts__['vault']['url'])
if 'token' not in __opts__['vault']['auth']:
return True
headers = {'X-Vault-Token': __opts__['vault']['auth']['token']}
response = requests.get(url, headers=headers, verify=verify)
if response.status_code != 200:
return True
return False
except Exception as e:
raise salt.exceptions.CommandExecutionError(
'Error while looking up self token : {0}'.format(e))
class VaultError(Exception):
def __init__(self, message=None, errors=None):
if errors:
message = ', '.join(errors)
self.errors = errors
super(VaultError, self).__init__(message)
class InvalidRequest(VaultError):
pass
class Unauthorized(VaultError):
pass
class Forbidden(VaultError):
pass
class InvalidPath(VaultError):
pass
class RateLimitExceeded(VaultError):
pass
class InternalServerError(VaultError):
pass
class VaultNotInitialized(VaultError):
pass
class VaultDown(VaultError):
pass
class UnexpectedError(VaultError):
pass
class VaultClient(object):
def __init__(self,
url='http://localhost:8200',
token=None,
cert=None,
verify=True,
timeout=30,
proxies=None,
allow_redirects=True,
session=None):
if not session:
session = requests.Session()
self.allow_redirects = allow_redirects
self.session = session
self.token = token
self._url = url
self._kwargs = {
'cert': cert,
'verify': verify,
'timeout': timeout,
'proxies': proxies,
}
def read(self, path, wrap_ttl=None):
"""
GET /<path>
"""
try:
log.trace('Reading vault data from %s', path)
return self._get('/v1/{0}'.format(path), wrap_ttl=wrap_ttl).json()
except InvalidPath:
return None
def list(self, path):
"""
GET /<path>?list=true
"""
try:
payload = {'list': True}
return self._get('/v1/{}'.format(path), params=payload).json()
except InvalidPath:
return None
def write(self, path, translate_newlines=False, wrap_ttl=None, **kwargs):
"""
PUT /<path>
"""
if translate_newlines:
for k, v in kwargs.items():
if isinstance(v, six.string_types):
kwargs[k] = v.replace(r'\n', '\n')
response = self._put(
'/v1/{0}'.format(path), json=kwargs, wrap_ttl=wrap_ttl)
if response.status_code == 200:
return response.json()
def delete(self, path):
"""
DELETE /<path>
"""
self._delete('/v1/{0}'.format(path))
def unwrap(self, token):
"""
GET /cubbyhole/response
X-Vault-Token: <token>
"""
path = "cubbyhole/response"
_token = self.token
try:
self.token = token
return json.loads(self.read(path)['data']['response'])
finally:
self.token = _token
def is_initialized(self):
"""
GET /sys/init
"""
return self._get('/v1/sys/init').json()['initialized']
# def initialize(self, secret_shares=5, secret_threshold=3, pgp_keys=None):
# """
# PUT /sys/init
# """
# params = {
# 'secret_shares': secret_shares,
# 'secret_threshold': secret_threshold,
# }
# if pgp_keys:
# if len(pgp_keys) != secret_shares:
# raise ValueError('Length of pgp_keys must equal secret shares')
# params['pgp_keys'] = pgp_keys
# return self._put('/v1/sys/init', json=params).json()
@property
def seal_status(self):
"""
GET /sys/seal-status
"""
return self._get('/v1/sys/seal-status').json()
def is_sealed(self):
return self.seal_status['sealed']
def seal(self):
"""
PUT /sys/seal
"""
self._put('/v1/sys/seal')
def unseal(self, key, reset=False):
"""
PUT /sys/unseal
"""
params = {'key': key, 'reset': reset}
return self._put('/v1/sys/unseal', json=params).json()
def unseal_multi(self, keys):
result = None
for key in keys:
result = self.unseal(key)
if not result['sealed']:
break
return result
@property
def key_status(self):
"""
GET /sys/key-status
"""
return self._get('/v1/sys/key-status').json()
def rotate(self):
"""
PUT /sys/rotate
"""
self._put('/v1/sys/rotate')
@property
def rekey_status(self):
"""
GET /sys/rekey/init
"""
return self._get('/v1/sys/rekey/init').json()
def start_rekey(self,
secret_shares=5,
secret_threshold=3,
pgp_keys=None,
backup=False):
"""
PUT /sys/rekey/init
"""
params = {
'secret_shares': secret_shares,
'secret_threshold': secret_threshold,
}
if pgp_keys:
if len(pgp_keys) != secret_shares:
raise ValueError('Length of pgp_keys must equal secret shares')
params['pgp_keys'] = pgp_keys
params['backup'] = backup
resp = self._put('/v1/sys/rekey/init', json=params)
if resp.text:
return resp.json()
def cancel_rekey(self):
"""
DELETE /sys/rekey/init
"""
self._delete('/v1/sys/rekey/init')
def rekey(self, key, nonce=None):
"""
PUT /sys/rekey/update
"""
params = {
'key': key,
}
if nonce:
params['nonce'] = nonce
return self._put('/v1/sys/rekey/update', json=params).json()
def rekey_multi(self, keys, nonce=None):
result = None
for key in keys:
result = self.rekey(key, nonce=nonce)
if 'complete' in result and result['complete']:
break
return result
def get_backed_up_keys(self):
"""
GET /sys/rekey/backup
"""
return self._get('/v1/sys/rekey/backup').json()
@property
def ha_status(self):
"""
GET /sys/leader
"""
return self._get('/v1/sys/leader').json()
def get_lease(self, lease_id):
try:
lease = self.write('sys/leases/lookup', lease_id=lease_id)
except InvalidRequest:
log.exception('The specified lease is not valid')
lease = None
return lease
def renew_secret(self, lease_id, increment=None):
"""
PUT /sys/leases/renew
"""
params = {
'lease_id': lease_id,
'increment': increment,
}
return self._put('/v1/sys/leases/renew', json=params).json()
def revoke_secret(self, lease_id):
"""
PUT /sys/revoke/<lease id>
"""
self._put('/v1/sys/revoke/{0}'.format(lease_id))
def revoke_secret_prefix(self, path_prefix):
"""
PUT /sys/revoke-prefix/<path prefix>
"""
self._put('/v1/sys/revoke-prefix/{0}'.format(path_prefix))
def revoke_self_token(self):
"""
PUT /auth/token/revoke-self
"""
self._put('/v1/auth/token/revoke-self')
def list_secret_backends(self):
"""
GET /sys/mounts
"""
return self._get('/v1/sys/mounts').json()
def enable_secret_backend(self,
backend_type,
description=None,
mount_point=None,
config=None):
"""
POST /sys/auth/<mount point>
"""
if not mount_point:
mount_point = backend_type
params = {
'type': backend_type,
'description': description,
'config': config,
}
self._post('/v1/sys/mounts/{0}'.format(mount_point), json=params)
def tune_secret_backend(self,
backend_type,
mount_point=None,
default_lease_ttl=None,
max_lease_ttl=None):
"""
POST /sys/mounts/<mount point>/tune
"""
if not mount_point:
mount_point = backend_type
params = {
'default_lease_ttl': default_lease_ttl,
'max_lease_ttl': max_lease_ttl
}
self._post('/v1/sys/mounts/{0}/tune'.format(mount_point), json=params)
def get_secret_backend_tuning(self, backend_type, mount_point=None):
"""
GET /sys/mounts/<mount point>/tune
"""
if not mount_point:
mount_point = backend_type
return self._get('/v1/sys/mounts/{0}/tune'.format(mount_point)).json()
def disable_secret_backend(self, mount_point):
"""
DELETE /sys/mounts/<mount point>
"""
self._delete('/v1/sys/mounts/{0}'.format(mount_point))
def remount_secret_backend(self, from_mount_point, to_mount_point):
"""
POST /sys/remount
"""
params = {
'from': from_mount_point,
'to': to_mount_point,
}
self._post('/v1/sys/remount', json=params)
def list_policies(self):
"""
GET /sys/policy
"""
return self._get('/v1/sys/policy').json()['policies']
def get_policy(self, name, parse=False):
"""
GET /sys/policy/<name>
"""
try:
policy = self._get(
'/v1/sys/policy/{0}'.format(name)).json()['rules']
if parse:
if not HAS_HCL_PARSER:
raise ImportError('pyhcl is required for policy parsing')
policy = hcl.loads(policy)
return policy
except InvalidPath:
return None
def set_policy(self, name, rules):
"""
PUT /sys/policy/<name>
"""
if isinstance(rules, dict):
rules = json.dumps(rules)
params = {
'rules': rules,
}
self._put('/v1/sys/policy/{0}'.format(name), json=params)
def delete_policy(self, name):
"""
DELETE /sys/policy/<name>
"""
self._delete('/v1/sys/policy/{0}'.format(name))
def list_audit_backends(self):
"""
GET /sys/audit
"""
return self._get('/v1/sys/audit').json()
def enable_audit_backend(self,
backend_type,
description=None,
options=None,
name=None):
"""
POST /sys/audit/<name>
"""
if not name:
name = backend_type
params = {
'type': backend_type,
'description': description,
'options': options,
}
self._post('/v1/sys/audit/{0}'.format(name), json=params)
def disable_audit_backend(self, name):
"""
DELETE /sys/audit/<name>
"""
self._delete('/v1/sys/audit/{0}'.format(name))
def audit_hash(self, name, input):
"""
POST /sys/audit-hash
"""
params = {
'input': input,
}
return self._post(
'/v1/sys/audit-hash/{0}'.format(name), json=params).json()
def create_token(self,
role=None,
token_id=None,
policies=None,
meta=None,
no_parent=False,
lease=None,
display_name=None,
num_uses=None,
no_default_policy=False,
ttl=None,
orphan=False,
wrap_ttl=None,
renewable=None,
explicit_max_ttl=None,
period=None):
"""
POST /auth/token/create
POST /auth/token/create/<role>
POST /auth/token/create-orphan
"""
params = {
'id': token_id,
'policies': policies,
'meta': meta,
'no_parent': no_parent,
'display_name': display_name,
'num_uses': num_uses,
'no_default_policy': no_default_policy,
'renewable': renewable
}
if lease:
params['lease'] = lease
else:
params['ttl'] = ttl
params['explicit_max_ttl'] = explicit_max_ttl
if explicit_max_ttl:
params['explicit_max_ttl'] = explicit_max_ttl
if period:
params['period'] = period
if orphan:
return self._post(
'/v1/auth/token/create-orphan', json=params,
wrap_ttl=wrap_ttl).json()
elif role:
return self._post(
'/v1/auth/token/create/{0}'.format(role),
json=params,
wrap_ttl=wrap_ttl).json()
else:
return self._post(
'/v1/auth/token/create', json=params,
wrap_ttl=wrap_ttl).json()
def lookup_token(self, token=None, accessor=False, wrap_ttl=None):
"""
GET /auth/token/lookup/<token>
GET /auth/token/lookup-accessor/<token-accessor>
GET /auth/token/lookup-self
"""
if token:
if accessor:
path = '/v1/auth/token/lookup-accessor/{0}'.format(token)
return self._post(path, wrap_ttl=wrap_ttl).json()
else:
return self._get(
'/v1/auth/token/lookup/{0}'.format(token)).json()
else:
return self._get(
'/v1/auth/token/lookup-self', wrap_ttl=wrap_ttl).json()
def revoke_token(self, token, orphan=False, accessor=False):
"""
POST /auth/token/revoke/<token>
POST /auth/token/revoke-orphan/<token>
POST /auth/token/revoke-accessor/<token-accessor>
"""
if accessor and orphan:
msg = ("revoke_token does not support 'orphan' and 'accessor' "
"flags together")
raise InvalidRequest(msg)
elif accessor:
self._post('/v1/auth/token/revoke-accessor/{0}'.format(token))
elif orphan:
self._post('/v1/auth/token/revoke-orphan/{0}'.format(token))
else:
self._post('/v1/auth/token/revoke/{0}'.format(token))
def revoke_token_prefix(self, prefix):
"""
POST /auth/token/revoke-prefix/<prefix>
"""
self._post('/v1/auth/token/revoke-prefix/{0}'.format(prefix))
def renew_token(self, token=None, increment=None, wrap_ttl=None):
"""
POST /auth/token/renew/<token>
POST /auth/token/renew-self
"""
params = {
'increment': increment,
}
if token:
path = '/v1/auth/token/renew/{0}'.format(token)
return self._post(path, json=params, wrap_ttl=wrap_ttl).json()
else:
return self._post(
'/v1/auth/token/renew-self', json=params,
wrap_ttl=wrap_ttl).json()
def create_token_role(self,
role,
allowed_policies=None,
disallowed_policies=None,
orphan=None,
period=None,
renewable=None,
path_suffix=None,
explicit_max_ttl=None):
"""
POST /auth/token/roles/<role>
"""
params = {
'allowed_policies': allowed_policies,
'disallowed_policies': disallowed_policies,
'orphan': orphan,
'period': period,
'renewable': renewable,
'path_suffix': path_suffix,
'explicit_max_ttl': explicit_max_ttl
}
return self._post('/v1/auth/token/roles/{0}'.format(role), json=params)
def token_role(self, role):
"""
Returns the named token role.
"""
return self.read('auth/token/roles/{0}'.format(role))
def delete_token_role(self, role):
"""
Deletes the named token role.
"""
return self.delete('auth/token/roles/{0}'.format(role))
def list_token_roles(self):
"""
GET /auth/token/roles?list=true
"""
return self.list('auth/token/roles')
def logout(self, revoke_token=False):
"""
Clears the token used for authentication, optionally revoking it
before doing so
"""
if revoke_token:
self.revoke_self_token()
self.token = None
def is_authenticated(self):
"""
Helper method which returns the authentication status of the client
"""
if not self.token:
return False
try:
self.lookup_token()
return True
except Forbidden:
return False
except InvalidPath:
return False
except InvalidRequest:
return False
def auth_app_id(self,
app_id,
user_id,
mount_point='app-id',
use_token=True):
"""
POST /auth/<mount point>/login
"""
params = {
'app_id': app_id,
'user_id': user_id,
}
return self.auth(
'/v1/auth/{0}/login'.format(mount_point),
json=params,
use_token=use_token)
def auth_tls(self, mount_point='cert', use_token=True):
"""
POST /auth/<mount point>/login
"""
return self.auth(
'/v1/auth/{0}/login'.format(mount_point), use_token=use_token)
def auth_userpass(self,
username,
password,
mount_point='userpass',
use_token=True,
**kwargs):
"""
POST /auth/<mount point>/login/<username>
"""
params = {
'password': password,
}
params.update(kwargs)
return self.auth(
'/v1/auth/{0}/login/{1}'.format(mount_point, username),
json=params,
use_token=use_token)
def auth_ec2(self, pkcs7, nonce=None, role=None, use_token=True):
"""
POST /auth/aws/login
"""
params = {'pkcs7': pkcs7}
if nonce:
params['nonce'] = nonce
if role:
params['role'] = role
return self.auth(
'/v1/auth/aws/login', json=params, use_token=use_token)
def create_userpass(self,
username,
password,
policies,
mount_point='userpass',
**kwargs):
"""
POST /auth/<mount point>/users/<username>
"""
# Users can have more than 1 policy. It is easier for the user to pass
# in the policies as a list so if they do, we need to convert
# to a , delimited string.
if isinstance(policies, (list, set, tuple)):
policies = ','.join(policies)
params = {'password': password, 'policies': policies}
params.update(kwargs)
return self._post(
'/v1/auth/{}/users/{}'.format(mount_point, username), json=params)
def delete_userpass(self, username, mount_point='userpass'):
"""
DELETE /auth/<mount point>/users/<username>
"""
return self._delete('/v1/auth/{}/users/{}'.format(
mount_point, username))
def create_app_id(self,
app_id,
policies,
display_name=None,
mount_point='app-id',
**kwargs):
"""
POST /auth/<mount point>/map/app-id/<app_id>
"""
# app-id can have more than 1 policy. It is easier for the user to
# pass in the policies as a list so if they do, we need to convert
# to a , delimited string.
if isinstance(policies, (list, set, tuple)):
policies = ','.join(policies)
params = {'value': policies}
# Only use the display_name if it has a value. Made it a named param
# for user convienence instead of leaving it as part of the kwargs
if display_name:
params['display_name'] = display_name
params.update(kwargs)
return self._post(
'/v1/auth/{}/map/app-id/{}'.format(mount_point, app_id),
json=params)
def get_app_id(self, app_id, mount_point='app-id', wrap_ttl=None):
"""
GET /auth/<mount_point>/map/app-id/<app_id>
"""
path = '/v1/auth/{0}/map/app-id/{1}'.format(mount_point, app_id)
return self._get(path, wrap_ttl=wrap_ttl).json()
def delete_app_id(self, app_id, mount_point='app-id'):
"""
DELETE /auth/<mount_point>/map/app-id/<app_id>
"""
return self._delete('/v1/auth/{0}/map/app-id/{1}'.format(
mount_point, app_id))
def create_user_id(self,
user_id,
app_id,
cidr_block=None,
mount_point='app-id',
**kwargs):
"""
POST /auth/<mount point>/map/user-id/<user_id>
"""
# user-id can be associated to more than 1 app-id (aka policy).
# It is easier for the user to pass in the policies as a list so if
# they do, we need to convert to a , delimited string.
if isinstance(app_id, (list, set, tuple)):
app_id = ','.join(app_id)
params = {'value': app_id}
# Only use the cidr_block if it has a value. Made it a named param for
# user convienence instead of leaving it as part of the kwargs
if cidr_block:
params['cidr_block'] = cidr_block
params.update(kwargs)
return self._post(
'/v1/auth/{}/map/user-id/{}'.format(mount_point, user_id),
json=params)
def get_user_id(self, user_id, mount_point='app-id', wrap_ttl=None):
"""
GET /auth/<mount_point>/map/user-id/<user_id>
"""
path = '/v1/auth/{0}/map/user-id/{1}'.format(mount_point, user_id)
return self._get(path, wrap_ttl=wrap_ttl).json()
def delete_user_id(self, user_id, mount_point='app-id'):
"""
DELETE /auth/<mount_point>/map/user-id/<user_id>
"""
return self._delete('/v1/auth/{0}/map/user-id/{1}'.format(
mount_point, user_id))
def create_vault_ec2_client_configuration(self,
access_key=None,
secret_key=None,
endpoint=None):
"""
POST /auth/aws/config/client
"""
params = {}
if access_key:
params['access_key'] = access_key
if secret_key:
params['secret_key'] = secret_key
if endpoint is not None:
params['endpoint'] = endpoint
return self._post('/v1/auth/aws/config/client', json=params)
def get_vault_ec2_client_configuration(self):
"""
GET /auth/aws/config/client
"""
return self._get('/v1/auth/aws/config/client').json()
def delete_vault_ec2_client_configuration(self):
"""
DELETE /auth/aws/config/client
"""
return self._delete('/v1/auth/aws/config/client')
def create_vault_ec2_certificate_configuration(self, cert_name,
aws_public_cert):
"""
POST /auth/aws/config/certificate/<cert_name>
"""
params = {'cert_name': cert_name, 'aws_public_cert': aws_public_cert}
return self._post(
'/v1/auth/aws/config/certificate/{0}'.format(cert_name),
json=params)
def get_vault_ec2_certificate_configuration(self, cert_name):
"""
GET /auth/aws/config/certificate/<cert_name>
"""
return self._get('/v1/auth/aws/config/certificate/{0}'.format(
cert_name)).json()
def list_vault_ec2_certificate_configurations(self):
"""
GET /auth/aws/config/certificates?list=true
"""
params = {'list': True}
return self._get(
'/v1/auth/aws/config/certificates', params=params).json()
def create_ec2_role(self,
role,
bound_ami_id=None,
bound_account_id=None,
bound_iam_role_arn=None,
bound_iam_instance_profile_arn=None,
role_tag=None,
max_ttl=None,
policies=None,
allow_instance_migration=False,
disallow_reauthentication=False,
period="",
**kwargs):
"""
POST /auth/aws/role/<role>
"""
params = {
'role': role,
'disallow_reauthentication': disallow_reauthentication,
'allow_instance_migration': allow_instance_migration,
'period': period
}
if bound_ami_id is not None:
params['bound_ami_id'] = bound_ami_id
if bound_account_id is not None:
params['bound_account_id'] = bound_account_id
if bound_iam_role_arn is not None:
params['bound_iam_role_arn'] = bound_iam_role_arn
if bound_iam_instance_profile_arn is not None:
params[
'bound_iam_instance_profile_arn'] = bound_iam_instance_profile_arn
if role_tag is not None:
params['role_tag'] = role_tag
if max_ttl is not None:
params['max_ttl'] = max_ttl
if policies is not None:
params['policies'] = policies
params.update(**kwargs)
return self._post(
'/v1/auth/aws/role/{0}'.format(role), json=params)
def get_ec2_role(self, role):
"""
GET /auth/aws/role/<role>
"""
return self._get('/v1/auth/aws/role/{0}'.format(role)).json()
def delete_ec2_role(self, role):
"""
DELETE /auth/aws/role/<role>
"""
return self._delete('/v1/auth/aws/role/{0}'.format(role))
def list_ec2_roles(self):
"""
GET /auth/aws/roles?list=true
"""
try:
return self._get(
'/v1/auth/aws/roles', params={
'list': True
}).json()
except InvalidPath:
return None
def create_ec2_role_tag(self,
role,
policies=None,
max_ttl=None,
instance_id=None,
disallow_reauthentication=False,
allow_instance_migration=False):
"""
POST /auth/aws/role/<role>/tag
"""
params = {
'role': role,
'disallow_reauthentication': disallow_reauthentication,
'allow_instance_migration': allow_instance_migration
}
if max_ttl is not None:
params['max_ttl'] = max_ttl
if policies is not None:
params['policies'] = policies
if instance_id is not None:
params['instance_id'] = instance_id
return self._post(
'/v1/auth/aws/role/{0}/tag'.format(role), json=params).json()
def auth_ldap(self,
username,
password,
mount_point='ldap',
use_token=True,
**kwargs):
"""
POST /auth/<mount point>/login/<username>
"""
params = {
'password': password,
}
params.update(kwargs)
return self.auth(
'/v1/auth/{0}/login/{1}'.format(mount_point, username),
json=params,
use_token=use_token)
def auth_github(self, token, mount_point='github', use_token=True):
"""
POST /auth/<mount point>/login
"""
params = {
'token': token,
}
return self.auth(
'/v1/auth/{0}/login'.format(mount_point),
json=params,
use_token=use_token)
def auth(self, url, use_token=True, **kwargs):
response = self._post(url, **kwargs).json()
if use_token:
self.token = response['auth']['client_token']
return response
def list_auth_backends(self):
"""
GET /sys/auth
"""
return self._get('/v1/sys/auth').json()
def enable_auth_backend(self,
backend_type,
description=None,
mount_point=None):
"""
POST /sys/auth/<mount point>
"""
if not mount_point:
mount_point = backend_type
params = {
'type': backend_type,
'description': description,
}
self._post('/v1/sys/auth/{0}'.format(mount_point), json=params)
def disable_auth_backend(self, mount_point):
"""
DELETE /sys/auth/<mount point>
"""
self._delete('/v1/sys/auth/{0}'.format(mount_point))
def create_role(self, role_name, **kwargs):
"""
POST /auth/approle/role/<role name>
"""
self._post('/v1/auth/approle/role/{0}'.format(role_name), json=kwargs)
def list_roles(self):
"""
GET /auth/approle/role
"""
return self._get('/v1/auth/approle/role?list=true').json()
def get_role_id(self, role_name):
"""
GET /auth/approle/role/<role name>/role-id
"""
url = '/v1/auth/approle/role/{0}/role-id'.format(role_name)
return self._get(url).json()['data']['role_id']
def set_role_id(self, role_name, role_id):
"""
POST /auth/approle/role/<role name>/role-id
"""
url = '/v1/auth/approle/role/{0}/role-id'.format(role_name)
params = {'role_id': role_id}
self._post(url, json=params)
def get_role(self, role_name):
"""
GET /auth/approle/role/<role name>
"""
return self._get('/v1/auth/approle/role/{0}'.format(role_name)).json()
def create_role_secret_id(self, role_name, meta=None, cidr_list=None):
"""
POST /auth/approle/role/<role name>/secret-id
"""
url = '/v1/auth/approle/role/{0}/secret-id'.format(role_name)
params = {}
if meta is not None:
params['metadata'] = json.dumps(meta)
if cidr_list is not None:
params['cidr_list'] = cidr_list
return self._post(url, json=params).json()
def get_role_secret_id(self, role_name, secret_id):
"""
POST /auth/approle/role/<role name>/secret-id/lookup
"""
url = '/v1/auth/approle/role/{0}/secret-id/lookup'.format(role_name)
params = {'secret_id': secret_id}
return self._post(url, json=params).json()
def list_role_secrets(self, role_name):
"""
GET /auth/approle/role/<role name>/secret-id?list=true
"""
url = '/v1/auth/approle/role/{0}/secret-id?list=true'.format(role_name)
return self._get(url).json()
def get_role_secret_id_accessor(self, role_name, secret_id_accessor):
"""
GET /auth/approle/role/<role name>/secret-id-accessor/<secret_id_accessor>
"""
url = '/v1/auth/approle/role/{0}/secret-id-accessor/{1}'.format(
role_name, secret_id_accessor)
return self._get(url).json()
def delete_role_secret_id(self, role_name, secret_id):
"""
POST /auth/approle/role/<role name>/secret-id/destroy
"""
url = '/v1/auth/approle/role/{0}/secret-id/destroy'.format(role_name)
params = {'secret_id': secret_id}
self._post(url, json=params)
def delete_role_secret_id_accessor(self, role_name, secret_id_accessor):
"""
DELETE /auth/approle/role/<role name>/secret-id/<secret_id_accessor>
"""
url = '/v1/auth/approle/role/{0}/secret-id-accessor/{1}'.format(
role_name, secret_id_accessor)
self._delete(url)
def create_role_custom_secret_id(self, role_name, secret_id, meta=None):
"""
POST /auth/approle/role/<role name>/custom-secret-id
"""
url = '/v1/auth/approle/role/{0}/custom-secret-id'.format(role_name)
params = {'secret_id': secret_id}
if meta is not None:
params['meta'] = meta
return self._post(url, json=params).json()
def auth_approle(self,
role_id,
secret_id=None,
mount_point='approle',
use_token=True):
"""
POST /auth/approle/login
"""
params = {'role_id': role_id}
if secret_id is not None:
params['secret_id'] = secret_id
return self.auth(
'/v1/auth/{0}/login'.format(mount_point),
json=params,
use_token=use_token)
def transit_create_key(self,
name,
convergent_encryption=None,
derived=None,
exportable=None,
key_type=None,
mount_point='transit'):
"""
POST /<mount_point>/keys/<name>
"""
url = '/v1/{0}/keys/{1}'.format(mount_point, name)
params = {}
if convergent_encryption is not None:
params['convergent_encryption'] = convergent_encryption
if derived is not None:
params['derived'] = derived
if exportable is not None:
params['exportable'] = exportable
if key_type is not None:
params['type'] = key_type
return self._post(url, json=params)
def transit_read_key(self, name, mount_point='transit'):
"""
GET /<mount_point>/keys/<name>
"""
url = '/v1/{0}/keys/{1}'.format(mount_point, name)
return self._get(url).json()
def transit_list_keys(self, mount_point='transit'):
"""
GET /<mount_point>/keys?list=true
"""
url = '/v1/{0}/keys?list=true'.format(mount_point)
return self._get(url).json()
def transit_delete_key(self, name, mount_point='transit'):
"""
DELETE /<mount_point>/keys/<name>
"""
url = '/v1/{0}/keys/{1}'.format(mount_point, name)
return self._delete(url)
def transit_update_key(self,
name,
min_decryption_version=None,
min_encryption_version=None,
deletion_allowed=None,
mount_point='transit'):
"""
POST /<mount_point>/keys/<name>/config
"""
url = '/v1/{0}/keys/{1}/config'.format(mount_point, name)
params = {}
if min_decryption_version is not None:
params['min_decryption_version'] = min_decryption_version
if min_encryption_version is not None:
params['min_encryption_version'] = min_encryption_version
if deletion_allowed is not None:
params['deletion_allowed'] = deletion_allowed
return self._post(url, json=params)
def transit_rotate_key(self, name, mount_point='transit'):
"""
POST /<mount_point>/keys/<name>/rotate
"""
url = '/v1/{0}/keys/{1}/rotate'.format(mount_point, name)
return self._post(url)
def transit_export_key(self,
name,
key_type,
version=None,
mount_point='transit'):
"""
GET /<mount_point>/export/<key_type>/<name>(/<version>)
"""
if version is not None:
url = '/v1/{0}/export/{1}/{2}/{3}'.format(mount_point, key_type,
name, version)
else:
url = '/v1/{0}/export/{1}/{2}'.format(mount_point, key_type, name)
return self._get(url).json()
def transit_encrypt_data(self,
name,
plaintext,
context=None,
key_version=None,
nonce=None,
batch_input=None,
key_type=None,
convergent_encryption=None,
mount_point='transit'):
"""
POST /<mount_point>/encrypt/<name>
"""
url = '/v1/{0}/encrypt/{1}'.format(mount_point, name)
params = {'plaintext': plaintext}
if context is not None:
params['context'] = context
if key_version is not None:
params['key_version'] = key_version
if nonce is not None:
params['nonce'] = nonce
if batch_input is not None:
params['batch_input'] = batch_input
if key_type is not None:
params['type'] = key_type
if convergent_encryption is not None:
params['convergent_encryption'] = convergent_encryption
return self._post(url, json=params).json()
def transit_decrypt_data(self,
name,
ciphertext,
context=None,
nonce=None,
batch_input=None,
mount_point='transit'):
"""
POST /<mount_point>/decrypt/<name>
"""
url = '/v1/{0}/decrypt/{1}'.format(mount_point, name)
params = {'ciphertext': ciphertext}
if context is not None:
params['context'] = context
if nonce is not None:
params['nonce'] = nonce
if batch_input is not None:
params['batch_input'] = batch_input
return self._post(url, json=params).json()
def transit_rewrap_data(self,
name,
ciphertext,
context=None,
key_version=None,
nonce=None,
batch_input=None,
mount_point='transit'):
"""
POST /<mount_point>/rewrap/<name>
"""
url = '/v1/{0}/rewrap/{1}'.format(mount_point, name)
params = {'ciphertext': ciphertext}
if context is not None:
params['context'] = context
if key_version is not None:
params['key_version'] = key_version
if nonce is not None:
params['nonce'] = nonce
if batch_input is not None:
params['batch_input'] = batch_input
return self._post(url, json=params).json()
def transit_generate_data_key(self,
name,
key_type,
context=None,
nonce=None,
bits=None,
mount_point='transit'):
"""
POST /<mount_point>/datakey/<type>/<name>
"""
url = '/v1/{0}/datakey/{1}/{2}'.format(mount_point, key_type, name)
params = {}
if context is not None:
params['context'] = context
if nonce is not None:
params['nonce'] = nonce
if bits is not None:
params['bits'] = bits
return self._post(url, json=params).json()
def transit_generate_rand_bytes(self,
data_bytes=None,
output_format=None,
mount_point='transit'):
"""
POST /<mount_point>/random(/<data_bytes>)
"""
if data_bytes is not None:
url = '/v1/{0}/random/{1}'.format(mount_point, data_bytes)
else:
url = '/v1/{0}/random'.format(mount_point)
params = {}
if output_format is not None:
params["format"] = output_format
return self._post(url, json=params).json()
def transit_hash_data(self,
hash_input,
algorithm=None,
output_format=None,
mount_point='transit'):
"""
POST /<mount_point>/hash(/<algorithm>)
"""
if algorithm is not None:
url = '/v1/{0}/hash/{1}'.format(mount_point, algorithm)
else:
url = '/v1/{0}/hash'.format(mount_point)
params = {'input': hash_input}
if output_format is not None:
params['format'] = output_format
return self._post(url, json=params).json()
def transit_generate_hmac(self,
name,
hmac_input,
key_version=None,
algorithm=None,
mount_point='transit'):
"""
POST /<mount_point>/hmac/<name>(/<algorithm>)
"""
if algorithm is not None:
url = '/v1/{0}/hmac/{1}/{2}'.format(mount_point, name, algorithm)
else:
url = '/v1/{0}/hmac/{1}'.format(mount_point, name)
params = {'input': hmac_input}
if key_version is not None:
params['key_version'] = key_version
return self._post(url, json=params).json()
def transit_sign_data(self,
name,
input_data,
key_version=None,
algorithm=None,
context=None,
prehashed=None,
mount_point='transit'):
"""
POST /<mount_point>/sign/<name>(/<algorithm>)
"""
if algorithm is not None:
url = '/v1/{0}/sign/{1}/{2}'.format(mount_point, name, algorithm)
else:
url = '/v1/{0}/sign/{1}'.format(mount_point, name)
params = {'input': input_data}
if key_version is not None:
params['key_version'] = key_version
if context is not None:
params['context'] = context
if prehashed is not None:
params['prehashed'] = prehashed
return self._post(url, json=params).json()
def transit_verify_signed_data(self,
name,
input_data,
algorithm=None,
signature=None,
hmac=None,
context=None,
prehashed=None,
mount_point='transit'):
"""
POST /<mount_point>/verify/<name>(/<algorithm>)
"""
if algorithm is not None:
url = '/v1/{0}/verify/{1}/{2}'.format(mount_point, name, algorithm)
else:
url = '/v1/{0}/verify/{1}'.format(mount_point, name)
params = {'input': input_data}
if signature is not None:
params['signature'] = signature
if hmac is not None:
params['hmac'] = hmac
if context is not None:
params['context'] = context
if prehashed is not None:
params['prehashed'] = prehashed
return self._post(url, json=params).json()
def close(self):
"""
Close the underlying Requests session
"""
self.session.close()
def _get(self, url, **kwargs):
return self.__request('get', url, **kwargs)
def _post(self, url, **kwargs):
return self.__request('post', url, **kwargs)
def _put(self, url, **kwargs):
return self.__request('put', url, **kwargs)
def _delete(self, url, **kwargs):
return self.__request('delete', url, **kwargs)
def __request(self, method, url, headers=None, **kwargs):
url = urljoin(self._url, url)
if not headers:
headers = {}
if self.token:
headers['X-Vault-Token'] = self.token
wrap_ttl = kwargs.pop('wrap_ttl', None)
if wrap_ttl:
headers['X-Vault-Wrap-TTL'] = str(wrap_ttl)
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
response = self.session.request(
method, url, headers=headers, allow_redirects=False, **_kwargs)
# NOTE(ianunruh): workaround for https://github.com/ianunruh/hvac/issues/51
while response.is_redirect and self.allow_redirects:
url = urljoin(self._url, response.headers['Location'])
response = self.session.request(
method, url, headers=headers, allow_redirects=False, **_kwargs)
if response.status_code >= 400 and response.status_code < 600:
text = errors = None
if response.headers.get('Content-Type') == 'application/json':
errors = response.json().get('errors')
if errors is None:
text = response.text
self.__raise_error(response.status_code, text, errors=errors)
return response
def __raise_error(self, status_code, message=None, errors=None):
if status_code == 400:
raise InvalidRequest(message, errors=errors)
elif status_code == 401:
raise Unauthorized(message, errors=errors)
elif status_code == 403:
raise Forbidden(message, errors=errors)
elif status_code == 404:
raise InvalidPath(message, errors=errors)
elif status_code == 429:
raise RateLimitExceeded(message, errors=errors)
elif status_code == 500:
raise InternalServerError(message, errors=errors)
elif status_code == 501:
raise VaultNotInitialized(message, errors=errors)
elif status_code == 503:
raise VaultDown(message, errors=errors)
else:
raise UnexpectedError(message)
def cache_client(client_builder):
_client = []
@wraps(client_builder)
def get_client(*args, **kwargs):
if not _client:
_client.append(client_builder(*args, **kwargs))
return _client[0]
return get_client
@cache_client
def build_client(url='https://localhost:8200',
token=None,
cert=None,
verify=True,
timeout=30,
proxies=None,
allow_redirects=True,
session=None):
client_kwargs = locals()
for k, v in client_kwargs.items():
if k.startswith('_'):
continue
arg_val = __salt__['config.get']('vault.{key}'.format(key=k), v)
log.debug('Setting {0} parameter for HVAC client to {1}.'
.format(k, arg_val))
client_kwargs[k] = arg_val
return VaultClient(**client_kwargs)
def vault_client():
return VaultClient
def vault_error():
return VaultError
def bind_client(unbound_function):
@wraps(unbound_function)
def bound_function(*args, **kwargs):
filtered_kwargs = {k: v for k, v in kwargs.items()
if not k.startswith('_')}
ignore_invalid = filtered_kwargs.pop('ignore_invalid', None)
client = build_client()
try:
return unbound_function(client, *args, **filtered_kwargs)
except InvalidRequest:
if ignore_invalid:
return None
else:
raise
return bound_function
def get_keybase_pubkey(username):
"""
Return the base64 encoded public PGP key for a keybase user.
"""
# Retrieve the text of the public key stored in Keybase
user = requests.get('https://keybase.io/{username}/key.asc'.format(
username=username))
# Explicitly raise an exception if there is an HTTP error. No-op on success
user.raise_for_status()
# Process the key to only include the contents and not the wrapping
# contents (e.g. ----BEGIN PGP KEY---)
key_lines = user.text.strip('\n').split('\n')
key_lines = key_lines[key_lines.index(''):-2]
return ''.join(key_lines)
def unseal(sealing_keys):
client = build_client()
client.unseal_multi(sealing_keys)
def rekey(secret_shares, secret_threshold, sealing_keys, pgp_keys, root_token):
client = build_client(token=root_token)
rekey = client.start_rekey(secret_shares, secret_threshold, pgp_keys,
backup=True)
client.rekey_multi(sealing_keys, nonce=rekey['nonce'])
def wait_after_init(client, retries=5):
'''This function will allow for a configurable delay before attempting
to issue requests after an initialization. This is necessary because when
running on an HA backend there is a short period where the Vault instance
will be on standby while it acquires the lock.'''
ready = False
while retries > 0 and not ready:
try:
status = client.read('sys/health')
ready = (status.get('initialized') and not status.get('sealed')
and not status.get('standby'))
except VaultError:
pass
if ready:
break
retries -= 1
time.sleep(1)
| 31.641489
| 101
| 0.529952
|
b093de44181274cc23c5048acf6be31d3288f81f
| 1,027
|
py
|
Python
|
src/thelist/migrations/0001_initial.py
|
terean-dspd/data-tables-plus-django-rest-famework-related-object-sotring
|
3a5604d80b21193ce80f3ff5c9e40c43c0b8edda
|
[
"MIT"
] | null | null | null |
src/thelist/migrations/0001_initial.py
|
terean-dspd/data-tables-plus-django-rest-famework-related-object-sotring
|
3a5604d80b21193ce80f3ff5c9e40c43c0b8edda
|
[
"MIT"
] | null | null | null |
src/thelist/migrations/0001_initial.py
|
terean-dspd/data-tables-plus-django-rest-famework-related-object-sotring
|
3a5604d80b21193ce80f3ff5c9e40c43c0b8edda
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-08 11:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(verbose_name='Amount')),
('item', models.CharField(max_length=200, verbose_name='Item')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='thelist.Client', verbose_name='Clent')),
],
),
]
| 32.09375
| 134
| 0.5852
|
09109172e7cb455c525a1b807092892986a4b6be
| 6,082
|
py
|
Python
|
kinopoisk/movie/__init__.py
|
GitBib/kinopoiskpy
|
37fb13fc7b04905572e02a1f576fdf4bb17177f3
|
[
"BSD-3-Clause"
] | null | null | null |
kinopoisk/movie/__init__.py
|
GitBib/kinopoiskpy
|
37fb13fc7b04905572e02a1f576fdf4bb17177f3
|
[
"BSD-3-Clause"
] | null | null | null |
kinopoisk/movie/__init__.py
|
GitBib/kinopoiskpy
|
37fb13fc7b04905572e02a1f576fdf4bb17177f3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from future.utils import python_2_unicode_compatible
from bs4 import BeautifulSoup
from .sources import (
MovieLink, MoviePremierLink, MovieMainPage, MoviePostersPage, MovieTrailersPage, MovieSeries, MovieCareerLink,
MovieCastPage, MovieRoleLink, MovieStillsPage)
from ..utils import KinopoiskObject, Manager, HEADERS
@python_2_unicode_compatible
class Movie(KinopoiskObject):
"""
Movie Class
"""
def set_defaults(self):
self.title = ''
self.title_en = ''
self.plot = ''
self.year = None
self.countries = []
self.tagline = ''
self.actors = []
self.directors = []
self.screenwriters = []
self.producers = []
self.operators = []
self.composers = []
self.art_direction_by = []
self.editing_by = []
self.genres = []
self.cast = {}
self.budget = None
self.marketing = None
self.profit_usa = None
self.profit_russia = None
self.profit_world = None
self.audience = []
self.rating = None
self.votes = None
self.imdb_rating = None
self.imdb_votes = None
self.runtime = None
self.release = None
self.posters = []
self.trailers = []
self.youtube_ids = []
self.series = None
self.series_years = tuple()
self.seasons = []
def __init__(self, *args, **kwargs):
super(Movie, self).__init__(*args, **kwargs)
self.register_source('link', MovieLink)
self.register_source('premier_link', MoviePremierLink)
self.register_source('career_link', MovieCareerLink)
self.register_source('main_page', MovieMainPage)
self.register_source('cast', MovieCastPage)
self.register_source('posters', MoviePostersPage)
self.register_source('stills', MovieStillsPage)
self.register_source('trailers', MovieTrailersPage)
self.register_source('series', MovieSeries)
def __repr__(self):
return '{} ({}), {}'.format(self.title, self.title_en, self.year or '-')
def add_trailer(self, trailer_id):
trailer = Trailer(trailer_id)
if trailer.is_valid and trailer.id not in [tr.id for tr in self.trailers]:
self.trailers.append(trailer)
def add_series_season(self, year, episodes):
self.seasons.append(SeriesSeason(year, [SeriesEpisode(title, date) for title, date in episodes]))
@python_2_unicode_compatible
class Role(KinopoiskObject):
"""
Movie Role Class
"""
def set_defaults(self):
self.name = ''
self.person = None
def __init__(self, *args, **kwargs):
super(Role, self).__init__(*args, **kwargs)
self.register_source('role_link', MovieRoleLink)
@python_2_unicode_compatible
class Trailer(object):
"""
Movie Trailer Class
"""
def set_defaults(self):
self.id = None
def __init__(self, id):
self.set_defaults()
if id:
self.id = id
@property
def is_valid(self):
"""
Check if filename is correct
"""
# not youtube video '521689/' (http://www.kinopoisk.ru/film/521689/video/)
return self.file[-1] != '/'
@property
def file(self):
trailer_file = 'gettrailer.php?quality=hd&trailer_id={}'.format(self.id)
return trailer_file
@python_2_unicode_compatible
class SeriesEpisode(object):
def set_defaults(self):
self.title = ''
self.release_date = None
def __init__(self, title=None, release_date=None):
self.set_defaults()
self.title = title
self.release_date = release_date
def __repr__(self):
return '{}, {}'.format(self.title if self.title else '???', self.release_date or '-')
@python_2_unicode_compatible
class SeriesSeason(object):
def set_defaults(self):
self.year = None
self.episodes = []
def __init__(self, year, episodes=None):
self.set_defaults()
self.year = year
if episodes:
self.episodes = episodes
def __repr__(self):
return '{}: {}'.format(self.year, len(self.episodes))
class MovieManager(Manager):
"""
Movie manager
"""
kinopoisk_object = Movie
def get_url_with_params(self, query):
# http://www.kinopoisk.ru/index.php?level=7&from=forma&result=adv&m_act[from]=forma&m_act[what]=content&m_act[find]=pulp+fiction
return ('http://www.kinopoisk.ru/index.php', {
'level': 7,
'from': 'forma',
'result': 'adv',
'm_act[from]': 'forma',
'm_act[what]': 'content',
'm_act[find]': query,
})
# возвращает не по релевантности, а непонятно как
# http://www.kinopoisk.ru/index.php?level=7&ser=a:3:{s:4:"find";s:3:"day";s:4:"what";s:7:"content";s:5:"count";a:1:{s:7:"content";s:3:"113";}}&show=all
# return ('http://www.kinopoisk.ru/index.php', {
# 'level': 7,
# 'ser': 'a:3:{s:4:"find";s:%d:"%s";s:4:"what";s:7:"content";s:5:"count";a:1:{s:7:"content";s:3:"113";}}' % (
# len(query), query),
# 'show': 'all',
# })
class MoviePremiersManager(Manager):
kinopoisk_object = Movie
def get_url_with_params(self, query=None):
return 'http://www.kinopoisk.ru/level/8/view/prem/', {}
def all(self):
url, params = self.get_url_with_params()
response = self.request.get(url, params=params, headers=HEADERS)
content = response.content.decode('windows-1251', 'ignore')
content_soup = BeautifulSoup(content, 'html.parser')
instances = []
for premier in content_soup.findAll('div', {'class': 'premier_item'}):
instance = self.kinopoisk_object.get_parsed('premier_link', premier)
instances += [instance]
return instances
Movie.objects = MovieManager()
Movie.premiers = MoviePremiersManager()
| 29.381643
| 159
| 0.606708
|
5c522637fc18dc0fc429b3f6430c0e7f878e39ea
| 952
|
py
|
Python
|
job_app/transformers.py
|
ahmedezzeldin93/heyjobs
|
ada72a4ede5eabf04f465ecd0b5f677253e95579
|
[
"MIT"
] | null | null | null |
job_app/transformers.py
|
ahmedezzeldin93/heyjobs
|
ada72a4ede5eabf04f465ecd0b5f677253e95579
|
[
"MIT"
] | null | null | null |
job_app/transformers.py
|
ahmedezzeldin93/heyjobs
|
ada72a4ede5eabf04f465ecd0b5f677253e95579
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, salary_ix, hours_ix):
self.salary_ix = salary_ix
self.hours_ix = hours_ix
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
salary_per_hour = X[:, self.salary_ix] / X[:, self.hours_ix]
return np.c_[X, salary_per_hour]
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
class NumpySelector(BaseEstimator, TransformerMixin):
def __init__(self, indx):
self.indx = indx
def fit(self, X, y=None):
return self
def transform(self, X):
return X[:, self.indx]
| 23.219512
| 68
| 0.670168
|
d89233fa782515866f4b6d46fdf8d445021c861d
| 7,901
|
py
|
Python
|
torch_geometric/datasets/molecule_net.py
|
lsj2408/pytorch_geometric
|
21cc1efd7c3b2912f4c2c98ddd5e9065a9aef6d4
|
[
"MIT"
] | null | null | null |
torch_geometric/datasets/molecule_net.py
|
lsj2408/pytorch_geometric
|
21cc1efd7c3b2912f4c2c98ddd5e9065a9aef6d4
|
[
"MIT"
] | null | null | null |
torch_geometric/datasets/molecule_net.py
|
lsj2408/pytorch_geometric
|
21cc1efd7c3b2912f4c2c98ddd5e9065a9aef6d4
|
[
"MIT"
] | null | null | null |
import os
import os.path as osp
import re
import torch
from torch_geometric.data import (InMemoryDataset, Data, download_url,
extract_gz)
x_map = {
'atomic_num':
list(range(0, 119)),
'chirality': [
'CHI_UNSPECIFIED',
'CHI_TETRAHEDRAL_CW',
'CHI_TETRAHEDRAL_CCW',
'CHI_OTHER',
],
'degree':
list(range(0, 11)),
'formal_charge':
list(range(-5, 7)),
'num_hs':
list(range(0, 9)),
'num_radical_electrons':
list(range(0, 5)),
'hybridization': [
'UNSPECIFIED',
'S',
'SP',
'SP2',
'SP3',
'SP3D',
'SP3D2',
'OTHER',
],
'is_aromatic': [False, True],
'is_in_ring': [False, True],
}
e_map = {
'bond_type': [
'misc',
'SINGLE',
'DOUBLE',
'TRIPLE',
'AROMATIC',
],
'stereo': [
'STEREONONE',
'STEREOZ',
'STEREOE',
'STEREOCIS',
'STEREOTRANS',
'STEREOANY',
],
'is_conjugated': [False, True],
}
class MoleculeNet(InMemoryDataset):
r"""The `MoleculeNet <http://moleculenet.ai/datasets-1>`_ benchmark
collection from the `"MoleculeNet: A Benchmark for Molecular Machine
Learning" <https://arxiv.org/abs/1703.00564>`_ paper, containing datasets
from physical chemistry, biophysics and physiology.
All datasets come with the additional node and edge features introduced by
the `Open Graph Benchmark <https://ogb.stanford.edu/docs/graphprop/>`_.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"ESOL"`,
:obj:`"FreeSolv"`, :obj:`"Lipo"`, :obj:`"PCBA"`, :obj:`"MUV"`,
:obj:`"HIV"`, :obj:`"BACE"`, :obj:`"BBPB"`, :obj:`"Tox21"`,
:obj:`"ToxCast"`, :obj:`"SIDER"`, :obj:`"ClinTox"`).
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
pre_filter (callable, optional): A function that takes in an
:obj:`torch_geometric.data.Data` object and returns a boolean
value, indicating whether the data object should be included in the
final dataset. (default: :obj:`None`)
"""
url = 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/{}'
# Format: name: [display_name, url_name, csv_name, smiles_idx, y_idx]
names = {
'esol': ['ESOL', 'delaney-processed.csv', 'delaney-processed', -1, -2],
'freesolv': ['FreeSolv', 'SAMPL.csv', 'SAMPL', 1, 2],
'lipo': ['Lipophilicity', 'Lipophilicity.csv', 'Lipophilicity', 2, 1],
'pcba': ['PCBA', 'pcba.csv.gz', 'pcba', -1,
slice(0, 128)],
'muv': ['MUV', 'muv.csv.gz', 'muv', -1,
slice(0, 17)],
'hiv': ['HIV', 'HIV.csv', 'HIV', 0, -1],
'bace': ['BACE', 'bace.csv', 'bace', 0, 2],
'bbbp': ['BBPB', 'BBBP.csv', 'BBBP', -1, -2],
'tox21': ['Tox21', 'tox21.csv.gz', 'tox21', -1,
slice(0, 12)],
'toxcast':
['ToxCast', 'toxcast_data.csv.gz', 'toxcast_data', 0,
slice(1, 618)],
'sider': ['SIDER', 'sider.csv.gz', 'sider', 0,
slice(1, 28)],
'clintox': ['ClinTox', 'clintox.csv.gz', 'clintox', 0,
slice(1, 3)],
}
def __init__(self, root, name, transform=None, pre_transform=None,
pre_filter=None):
self.name = name.lower()
assert self.name in self.names.keys()
super().__init__(root, transform, pre_transform, pre_filter)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_dir(self):
return osp.join(self.root, self.name, 'raw')
@property
def processed_dir(self):
return osp.join(self.root, self.name, 'processed')
@property
def raw_file_names(self):
return f'{self.names[self.name][2]}.csv'
@property
def processed_file_names(self):
return 'data.pt'
def download(self):
url = self.url.format(self.names[self.name][1])
path = download_url(url, self.raw_dir)
if self.names[self.name][1][-2:] == 'gz':
extract_gz(path, self.raw_dir)
os.unlink(path)
def process(self):
from rdkit import Chem
with open(self.raw_paths[0], 'r') as f:
dataset = f.read().split('\n')[1:-1]
dataset = [x for x in dataset if len(x) > 0] # Filter empty lines.
data_list = []
for line in dataset:
line = re.sub(r'\".*\"', '', line) # Replace ".*" strings.
line = line.split(',')
smiles = line[self.names[self.name][3]]
ys = line[self.names[self.name][4]]
ys = ys if isinstance(ys, list) else [ys]
ys = [float(y) if len(y) > 0 else float('NaN') for y in ys]
y = torch.tensor(ys, dtype=torch.float).view(1, -1)
mol = Chem.MolFromSmiles(smiles)
if mol is None:
continue
xs = []
for atom in mol.GetAtoms():
x = []
x.append(x_map['atomic_num'].index(atom.GetAtomicNum()))
x.append(x_map['chirality'].index(str(atom.GetChiralTag())))
x.append(x_map['degree'].index(atom.GetTotalDegree()))
x.append(x_map['formal_charge'].index(atom.GetFormalCharge()))
x.append(x_map['num_hs'].index(atom.GetTotalNumHs()))
x.append(x_map['num_radical_electrons'].index(
atom.GetNumRadicalElectrons()))
x.append(x_map['hybridization'].index(
str(atom.GetHybridization())))
x.append(x_map['is_aromatic'].index(atom.GetIsAromatic()))
x.append(x_map['is_in_ring'].index(atom.IsInRing()))
xs.append(x)
x = torch.tensor(xs, dtype=torch.long).view(-1, 9)
edge_indices, edge_attrs = [], []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
e = []
e.append(e_map['bond_type'].index(str(bond.GetBondType())))
e.append(e_map['stereo'].index(str(bond.GetStereo())))
e.append(e_map['is_conjugated'].index(bond.GetIsConjugated()))
edge_indices += [[i, j], [j, i]]
edge_attrs += [e, e]
edge_index = torch.tensor(edge_indices)
edge_index = edge_index.t().to(torch.long).view(2, -1)
edge_attr = torch.tensor(edge_attrs, dtype=torch.long).view(-1, 3)
# Sort indices.
if edge_index.numel() > 0:
perm = (edge_index[0] * x.size(0) + edge_index[1]).argsort()
edge_index, edge_attr = edge_index[:, perm], edge_attr[perm]
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y,
smiles=smiles)
if self.pre_filter is not None and not self.pre_filter(data):
continue
if self.pre_transform is not None:
data = self.pre_transform(data)
data_list.append(data)
torch.save(self.collate(data_list), self.processed_paths[0])
def __repr__(self):
return '{}({})'.format(self.names[self.name][0], len(self))
| 35.751131
| 79
| 0.545754
|
20438a65880bf71c1a409d04640f816b0f7d1588
| 587
|
py
|
Python
|
iconconsole/transaction.py
|
eunsoo-icon/icon-console
|
60268ba09b69e7095841886d26d4aa70bb700230
|
[
"Apache-2.0"
] | null | null | null |
iconconsole/transaction.py
|
eunsoo-icon/icon-console
|
60268ba09b69e7095841886d26d4aa70bb700230
|
[
"Apache-2.0"
] | null | null | null |
iconconsole/transaction.py
|
eunsoo-icon/icon-console
|
60268ba09b69e7095841886d26d4aa70bb700230
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from iconconsole.network import Network
class TransactionResult:
def __init__(self, net: Network, tx_hash: str):
self._net = net
self._hash = tx_hash
def net(self) -> Network:
return self._net
def hash(self) -> str:
return self._hash
def transaction(self) -> dict:
return self._net.sdk.get_transaction(self._hash, True)
def result(self) -> dict:
return self._net.sdk.get_transaction_result(self._hash, True)
def trace(self) -> dict:
return self._net.sdk.get_trace(self._hash)
| 24.458333
| 69
| 0.640545
|
35ba27f223a1d9f60a8748f8ee1926f53a5cd142
| 780
|
py
|
Python
|
toscaparser/elements/attribute_definition.py
|
mikidep/tosca-parser
|
6cef1dfc712165c4d75aeae36f6bd4758fcfff5c
|
[
"Apache-2.0"
] | 99
|
2015-09-02T23:07:47.000Z
|
2022-02-02T14:13:07.000Z
|
toscaparser/elements/attribute_definition.py
|
mikidep/tosca-parser
|
6cef1dfc712165c4d75aeae36f6bd4758fcfff5c
|
[
"Apache-2.0"
] | 26
|
2019-09-09T04:45:17.000Z
|
2021-06-25T15:23:52.000Z
|
toscaparser/elements/attribute_definition.py
|
mikidep/tosca-parser
|
6cef1dfc712165c4d75aeae36f6bd4758fcfff5c
|
[
"Apache-2.0"
] | 59
|
2015-10-28T09:14:01.000Z
|
2022-02-13T13:54:24.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class AttributeDef(object):
'''TOSCA built-in Attribute type.'''
def __init__(self, name, value=None, schema=None):
self.name = name
self.value = value
self.schema = schema
| 37.142857
| 78
| 0.697436
|
33c67f7311a0ff85547869f95109b610dd62bf06
| 219
|
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/__init__.py
|
robertopreste/cc-pypackage
|
8c4639516fa7291a40d700f3bdde497196827c89
|
[
"BSD-3-Clause"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/__init__.py
|
robertopreste/cc-pypackage
|
8c4639516fa7291a40d700f3bdde497196827c89
|
[
"BSD-3-Clause"
] | 22
|
2019-04-04T03:29:36.000Z
|
2020-02-09T08:06:52.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/__init__.py
|
robertopreste/cc-pypackage
|
8c4639516fa7291a40d700f3bdde497196827c89
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by {{ cookiecutter.full_name }}
__author__ = """{{ cookiecutter.full_name }}"""
__email__ = "{{ cookiecutter.email }}"
__version__ = '{{ cookiecutter.version }}'
| 27.375
| 47
| 0.648402
|
0c469e5435854b00b5eed7dbfdc0d289f9c1ba25
| 10,067
|
py
|
Python
|
LINETCR/Api/channel.py
|
neo251214/odah
|
48518fcd3c6919746c6bc6c81d76a214526d8b56
|
[
"MIT"
] | null | null | null |
LINETCR/Api/channel.py
|
neo251214/odah
|
48518fcd3c6919746c6bc6c81d76a214526d8b56
|
[
"MIT"
] | null | null | null |
LINETCR/Api/channel.py
|
neo251214/odah
|
48518fcd3c6919746c6bc6c81d76a214526d8b56
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os, sys, json
path = os.path.join(os.path.dirname(__file__), '../lib/')
sys.path.insert(0, path)
import requests
from thrift.transport import THttpClient
from thrift.protocol import TCompactProtocol
from curve import LineService
from curve.ttypes import *
import tempfile
class Channel:
client = None
host = "gd2.line.naver.jp"
http_query_path = "/S4"
channel_query_path = "/CH4"
UA = "Line/1.4.17"
LA = "CHROMEOS\t1.4.17\tChrome_OS\t1"
authToken = None
mid = None
channel_access_token = None
token = None
obs_token = None
refresh_token = None
def __init__(self, authToken):
self.authToken = authToken
self.transport = THttpClient.THttpClient('https://gd2.line.naver.jp:443'+self.http_query_path)
self.transport.setCustomHeaders({ "User-Agent" : self.UA,
"X-Line-Application" : self.LA,
"X-Line-Access": self.authToken
})
self.transport.open()
self.protocol = TCompactProtocol.TCompactProtocol(self.transport)
self.client = LineService.Client(self.protocol)
self.mid = self.client.getProfile().mid
self.transport.path = self.channel_query_path
def login(self):
result = self.client.issueChannelToken("1341209850")
self.channel_access_token = result.channelAccessToken
self.token = result.token
self.obs_token = result.obsToken
self.refresh_token = result.refreshToken
print "channelAccessToken:" + result.channelAccessToken
print "token:" + result.token
print "obs_token:" + result.obsToken
print "refreshToken:" + result.refreshToken
def new_post(self, text):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"postInfo" : { "readPermission" : { "type" : "ALL" } },
"sourceType" : "TIMELINE",
"contents" : { "text" : text }
}
r = requests.post(
"http://" + self.host + "/mh/api/v24/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def postPhoto(self,text,path):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"postInfo" : { "readPermission" : { "type" : "ALL" } },
"sourceType" : "TIMELINE",
"contents" : { "text" : text ,"media" : [{u'objectId': u'U78558f381d1b52647646eb4db4d83397'}]}
}
r = requests.post(
"http://" + self.host + "/mh/api/v24/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def like(self, mid, postid, likeType=1001):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"likeType" : likeType,
"activityExternalId" : postid,
"actorId" : mid
}
r = requests.post(
"http://" + self.host + "/mh/api/v23/like/create.json?homeId=" + mid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def comment(self, mid, postid, text):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"commentText" : text,
"activityExternalId" : postid,
"actorId" : mid
}
r = requests.post(
"http://" + self.host + "/mh/api/v23/comment/create.json?homeId=" + mid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def activity(self, limit=20):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/tl/mapi/v21/activities?postLimit=" + str(limit),
headers = header
)
return r.json()
def getAlbum(self, gid):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/mh/album/v3/albums?type=g&sourceType=TALKROOM&homeId=" + gid,
headers = header
)
return r.json()
def changeAlbumName(self,gid,name,albumId):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
payload = {
"title": name
}
r = requests.put(
"http://" + self.host + "/mh/album/v3/album/" + albumId + "?homeId=" + gid,
headers = header,
data = json.dumps(payload),
)
return r.json()
def deleteAlbum(self,gid,albumId):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.delete(
"http://" + self.host + "/mh/album/v3/album/" + albumId + "?homeId=" + gid,
headers = header,
)
return r.json()
def getNote(self,gid, commentLimit, likeLimit):
header = {
"Content-Type" : "application/json",
"X-Line-Mid" : self.mid,
"x-lct": self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/mh/api/v27/post/list.json?homeId=" + gid + "&commentLimit=" + commentLimit + "&sourceType=TALKROOM&likeLimit=" + likeLimit,
headers = header
)
return r.json()
def postNote(self, gid, text):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {"postInfo":{"readPermission":{"homeId":gid}},
"sourceType":"GROUPHOME",
"contents":{"text":text}
}
r = requests.post(
"http://" + self.host + "/mh/api/v27/post/create.json",
headers = header,
data = json.dumps(payload)
)
return r.json()
def getDetail(self, mid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/ma/api/v1/userpopup/getDetail.json?userMid=" + mid,
headers = header
)
return r.json()
def getHome(self,mid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
r = requests.get(
"http://" + self.host + "/mh/api/v27/post/list.json?homeId=" + mid + "&commentLimit=2&sourceType=LINE_PROFILE_COVER&likeLimit=6",
headers = header
)
return r.json()
def getCover(self,mid):
h = self.getHome(mid)
objId = h["result"]["homeInfo"]["objectId"]
return "http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + mid + "&oid=" + objId
def createAlbum(self,gid,name):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"type" : "image",
"title" : name
}
r = requests.post(
"http://" + self.host + "/mh/album/v3/album?count=1&auto=0&homeId=" + gid,
headers = header,
data = json.dumps(payload)
)
return r.json()
def createAlbum2(self,gid,name,path,oid):
header = {
"Content-Type": "application/json",
"User-Agent" : self.UA,
"X-Line-Mid" : self.mid,
"x-lct" : self.channel_access_token,
}
payload = {
"type" : "image",
"title" : name
}
r = requests.post(
"http://" + self.host + "/mh/album/v3/album?count=1&auto=0&homeId=" + gid,
headers = header,
data = json.dumps(payload)
)
#albumId = r.json()["result"]["items"][0]["id"]
#h = {
# "Content-Type": "application/x-www-form-urlencoded",
# "User-Agent" : self.UA,
# "X-Line-Mid" : gid,
# "X-Line-Album" : albumId,
# "x-lct" : self.channel_access_token,
#"x-obs-host" : "obs-jp.line-apps.com:443",
#}
#print r.json()
#files = {
# 'file': open(path, 'rb'),
#}
#p = {
# "userid" : gid,
# "type" : "image",
# "oid" : oid,
# "ver" : "1.0"
#}
#data = {
# 'params': json.dumps(p)
#}
#r = requests.post(
#"http://obs-jp.line-apps.com/oa/album/a/object_info.nhn:443",
#headers = h,
#data = data,
#files = files
#)
return r.json()
#cl.createAlbum("cea9d61ba824e937aaf91637991ac934b","ss3ai","kawamuki.png")
| 30.598784
| 161
| 0.496871
|
6fffb35224b0d8674bf036b692e2f3a4acbe7b54
| 37,512
|
py
|
Python
|
notebooks/eval.py
|
Baukebrenninkmeijer/On-the-Generation-and-Evaluation-of-Synthetic-Tabular-Data-using-GANs
|
6883f83409e5c90ea3917224bf259fe30b223303
|
[
"MIT"
] | 21
|
2019-12-23T15:16:21.000Z
|
2022-03-25T14:17:06.000Z
|
notebooks/eval.py
|
Baukebrenninkmeijer/On-the-Generation-and-Evaluation-of-Synthetic-Tabular-Data-using-GANs
|
6883f83409e5c90ea3917224bf259fe30b223303
|
[
"MIT"
] | 1
|
2020-12-02T10:49:13.000Z
|
2020-12-10T16:23:28.000Z
|
notebooks/eval.py
|
Baukebrenninkmeijer/On-the-Generation-and-Evaluation-of-Synthetic-Tabular-Data-using-GANs
|
6883f83409e5c90ea3917224bf259fe30b223303
|
[
"MIT"
] | 4
|
2020-06-22T15:49:35.000Z
|
2022-01-24T12:45:52.000Z
|
import copy
import warnings
import logging
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy import stats
from scipy.spatial.distance import cdist
from dython.nominal import *
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from sklearn.metrics import f1_score, mean_squared_error
from sklearn.exceptions import ConvergenceWarning
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LogisticRegression
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def plot_var_cor(x, ax=None, ret=False, *args, **kwargs):
if isinstance(x, pd.DataFrame):
corr = x.corr().values
elif isinstance(x, np.ndarray):
corr = np.corrcoef(x, rowvar=False)
else:
raise Exception('Unknown datatype given. Make sure a Pandas DataFrame or Numpy Array is passed.')
sns.set(style="white")
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
if type(ax) is None:
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, ax=ax, mask=mask, cmap=cmap, vmax=1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, *args, **kwargs)
if ret:
return corr
def plot_correlation_difference(real: pd.DataFrame, fake: pd.DataFrame, plot_diff=True, cat_cols=None, **kwargs):
if cat_cols is None:
cat_cols = real.select_dtypes(['object', 'category'])
if plot_diff:
fig, ax = plt.subplots(1, 3, figsize=(24, 7))
else:
fig, ax = plt.subplots(1, 2, figsize=(20, 8))
real_corr = associations(real, nominal_columns=cat_cols, return_results=True, plot=True, theil_u=True,
mark_columns=True, ax=ax[0], **kwargs)
fake_corr = associations(fake, nominal_columns=cat_cols, return_results=True, plot=True, theil_u=True,
mark_columns=True, ax=ax[1], **kwargs)
if plot_diff:
diff = abs(real_corr - fake_corr)
sns.set(style="white")
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(diff, ax=ax[2], cmap=cmap, vmax=.3, square=True, annot=kwargs.get('annot', True), center=0,
linewidths=.5, cbar_kws={"shrink": .5}, fmt='.2f')
titles = ['Real', 'Fake', 'Difference'] if plot_diff else ['Real', 'Fake']
for i, label in enumerate(titles):
title_font = {'size': '18'}
ax[i].set_title(label, **title_font)
plt.tight_layout()
plt.show()
def plot_correlation_comparison(evaluators, **kwargs):
nr_plots = len(evaluators) + 1
fig, ax = plt.subplots(2, nr_plots, figsize=(4 * nr_plots, 7))
flat_ax = ax.flatten()
fake_corr = []
real_corr = associations(evaluators[0].real, nominal_columns=evaluators[0].categorical_columns, return_results=True, plot=True, theil_u=True,
mark_columns=True, ax=flat_ax[0], cbar=False, linewidths=0, **kwargs)
for i in range(1, nr_plots):
cbar = True if i % (nr_plots - 1) == 0 else False
fake_corr.append(associations(evaluators[i - 1].fake, nominal_columns=evaluators[0].categorical_columns, return_results=True, plot=True, theil_u=True,
mark_columns=True, ax=flat_ax[i], cbar=cbar, linewidths=0, **kwargs))
if i % (nr_plots - 1) == 0:
cbar = flat_ax[i].collections[0].colorbar
cbar.ax.tick_params(labelsize=20)
for i in range(1, nr_plots):
cbar = True if i % (nr_plots - 1) == 0 else False
diff = abs(real_corr - fake_corr[i - 1])
sns.set(style="white")
cmap = sns.diverging_palette(220, 10, as_cmap=True)
az = sns.heatmap(diff, ax=flat_ax[i + nr_plots], cmap=cmap, vmax=.3, square=True, annot=kwargs.get('annot', True), center=0,
linewidths=0, cbar_kws={"shrink": .8}, cbar=cbar, fmt='.2f')
if i % (nr_plots - 1) == 0:
cbar = az.collections[0].colorbar
cbar.ax.tick_params(labelsize=20)
titles = ['Real', 'TGAN', 'TGAN-WGAN-GP', 'TGAN-skip', 'MedGAN', 'TableGAN']
for i, label in enumerate(titles):
flat_ax[i].set_yticklabels([])
flat_ax[i].set_xticklabels([])
flat_ax[i + nr_plots].set_yticklabels([])
flat_ax[i + nr_plots].set_xticklabels([])
title_font = {'size': '28'}
flat_ax[i].set_title(label, **title_font)
plt.tight_layout()
def matrix_distance_abs(ma, mb):
return np.sum(np.abs(np.subtract(ma, mb)))
def matrix_distance_euclidian(ma, mb):
return np.sqrt(np.sum(np.power(np.subtract(ma, mb), 2)))
def cdf(data_r, data_f, xlabel, ylabel, ax=None):
"""
Plot continous density function on optionally given ax. If no ax, cdf is plotted and shown.
:param data_r: Series with real data
:param data_f: Series with fake data
:param xlabel: x-axis label
:param ylabel: y-axis label
:param ax: axis to plot on
"""
x1 = np.sort(data_r)
x2 = np.sort(data_f)
y = np.arange(1, len(data_r) + 1) / len(data_r)
ax = ax if ax else plt.subplots()[1]
axis_font = {'size': '14'}
ax.set_xlabel(xlabel, **axis_font)
ax.set_ylabel(ylabel, **axis_font)
ax.grid()
ax.plot(x1, y, marker='o', linestyle='none', label='Real', ms=8)
ax.plot(x2, y, marker='o', linestyle='none', label='Fake', alpha=0.5)
ax.tick_params(axis='both', which='major', labelsize=8)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=3)
# ax.set_xticks(ind)
if isinstance(data_r, pd.Series) and data_r.dtypes == 'object':
ax.set_xticklabels(data_r.value_counts().sort_index().index, rotation='vertical')
if ax is None:
plt.show()
def categorical_distribution(real, fake, xlabel, ylabel, col=None, ax=None):
ax = ax if ax else plt.subplots()[1]
if col is not None:
real = real[col]
fake = fake[col]
y_r = real.value_counts().sort_index() / len(real)
y_f = fake.value_counts().sort_index() / len(fake)
# width = 0.35 # the width of the bars
ind = np.arange(len(y_r.index))
ax.grid()
yr_cumsum = y_r.cumsum()
yf_cumsum = y_f.cumsum()
values = yr_cumsum.values.tolist() + yf_cumsum.values.tolist()
real = [1 for _ in range(len(yr_cumsum))] + [0 for _ in range(len(yf_cumsum))]
classes = yr_cumsum.index.tolist() + yf_cumsum.index.tolist()
data = pd.DataFrame({'values': values,
'real': real,
'class': classes})
paper_rc = {'lines.linewidth': 8}
sns.set_context("paper", rc=paper_rc)
# ax.plot(x=yr_cumsum.index.tolist(), y=yr_cumsum.values.tolist(), ms=8)
sns.lineplot(y='values', x='class', data=data, ax=ax, hue='real')
# ax.bar(ind - width / 2, y_r.values, width, label='Real')
# ax.bar(ind + width / 2, y_f.values, width, label='Fake')
ax.set_ylabel('Distributions per variable')
axis_font = {'size': '18'}
ax.set_xlabel(xlabel, **axis_font)
ax.set_ylabel(ylabel, **axis_font)
ax.set_xticks(ind)
ax.set_xticklabels(y_r.index, rotation='vertical')
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1), ncol=3)
def mean_absolute_error(y_true, y_pred):
return np.mean(np.abs(np.subtract(y_true, y_pred)))
def euclidean_distance(y_true, y_pred):
return np.sqrt(np.sum(np.power(np.subtract(y_true, y_pred), 2)))
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true))
def rmse(y, y_hat):
return np.sqrt(mean_squared_error(y, y_hat))
def column_correlations(dataset_a, dataset_b, categorical_columns, theil_u=True):
if categorical_columns is None:
categorical_columns = list()
elif categorical_columns == 'all':
categorical_columns = dataset_a.columns
assert dataset_a.columns.tolist() == dataset_b.columns.tolist()
corr = pd.DataFrame(columns=dataset_a.columns, index=['correlation'])
for column in dataset_a.columns.tolist():
if column in categorical_columns:
if theil_u:
corr[column] = theils_u(dataset_a[column].sort_values(), dataset_b[column].sort_values())
else:
corr[column] = cramers_v(dataset_a[column].sort_values(), dataset_b[column].sort_vaues())
else:
corr[column], _ = ss.pearsonr(dataset_a[column].sort_values(), dataset_b[column].sort_values())
corr.fillna(value=np.nan, inplace=True)
correlation = np.mean(corr.values.flatten())
return correlation
def associations(dataset, nominal_columns=None, mark_columns=False, theil_u=False, plot=True,
return_results=False, **kwargs):
"""
Adapted from: https://github.com/shakedzy/dython
Calculate the correlation/strength-of-association of features in data-set with both categorical (eda_tools) and
continuous features using:
- Pearson's R for continuous-continuous cases
- Correlation Ratio for categorical-continuous cases
- Cramer's V or Theil's U for categorical-categorical cases
:param dataset: NumPy ndarray / Pandas DataFrame
The data-set for which the features' correlation is computed
:param nominal_columns: string / list / NumPy ndarray
Names of columns of the data-set which hold categorical values. Can also be the string 'all' to state that all
columns are categorical, or None (default) to state none are categorical
:param mark_columns: Boolean (default: False)
if True, output's columns' names will have a suffix of '(nom)' or '(con)' based on there type (eda_tools or
continuous), as provided by nominal_columns
:param theil_u: Boolean (default: False)
In the case of categorical-categorical feaures, use Theil's U instead of Cramer's V
:param plot: Boolean (default: True)
If True, plot a heat-map of the correlation matrix
:param return_results: Boolean (default: False)
If True, the function will return a Pandas DataFrame of the computed associations
:param kwargs:
Arguments to be passed to used function and methods
:return: Pandas DataFrame
A DataFrame of the correlation/strength-of-association between all features
"""
dataset = convert(dataset, 'dataframe')
columns = dataset.columns
if nominal_columns is None:
nominal_columns = list()
elif nominal_columns == 'all':
nominal_columns = columns
corr = pd.DataFrame(index=columns, columns=columns)
for i in range(0, len(columns)):
for j in range(i, len(columns)):
if i == j:
corr[columns[i]][columns[j]] = 1.0
else:
if columns[i] in nominal_columns:
if columns[j] in nominal_columns:
if theil_u:
corr[columns[j]][columns[i]] = theils_u(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = theils_u(dataset[columns[j]], dataset[columns[i]])
else:
cell = cramers_v(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
cell = correlation_ratio(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
if columns[j] in nominal_columns:
cell = correlation_ratio(dataset[columns[j]], dataset[columns[i]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
else:
cell, _ = ss.pearsonr(dataset[columns[i]], dataset[columns[j]])
corr[columns[i]][columns[j]] = cell
corr[columns[j]][columns[i]] = cell
corr.fillna(value=np.nan, inplace=True)
if mark_columns:
marked_columns = ['{} (nom)'.format(col) if col in nominal_columns else '{} (con)'.format(col) for col in
columns]
corr.columns = marked_columns
corr.index = marked_columns
if plot:
if kwargs.get('ax') is None:
plt.figure(figsize=kwargs.get('figsize', None))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.set(style="white")
sns.heatmap(corr, annot=kwargs.get('annot', True), fmt=kwargs.get('fmt', '.2f'), cmap=cmap, vmax=1, center=0,
square=True, linewidths=kwargs.get('linewidths', 0.5), cbar_kws={"shrink": .8}, cbar=kwargs.get('cbar', True), ax=kwargs.get('ax', None))
if kwargs.get('ax') is None:
plt.show()
if return_results:
return corr
def numerical_encoding(dataset, nominal_columns='all', drop_single_label=False, drop_fact_dict=True):
"""
Adapted from: https://github.com/shakedzy/dython
Encoding a data-set with mixed data (numerical and categorical) to a numerical-only data-set,
using the following logic:
* categorical with only a single value will be marked as zero (or dropped, if requested)
* categorical with two values will be replaced with the result of Pandas `factorize`
* categorical with more than two values will be replaced with the result of Pandas `get_dummies`
* numerical columns will not be modified
**Returns:** DataFrame or (DataFrame, dict). If `drop_fact_dict` is True, returns the encoded DataFrame.
else, returns a tuple of the encoded DataFrame and dictionary, where each key is a two-value column, and the
value is the original labels, as supplied by Pandas `factorize`. Will be empty if no two-value columns are
present in the data-set
Parameters
----------
dataset : NumPy ndarray / Pandas DataFrame
The data-set to encode
nominal_columns : sequence / string
A sequence of the nominal (categorical) columns in the dataset. If string, must be 'all' to state that
all columns are nominal. If None, nothing happens. Default: 'all'
drop_single_label : Boolean, default = False
If True, nominal columns with a only a single value will be dropped.
drop_fact_dict : Boolean, default = True
If True, the return value will be the encoded DataFrame alone. If False, it will be a tuple of
the DataFrame and the dictionary of the binary factorization (originating from pd.factorize)
"""
dataset = convert(dataset, 'dataframe')
if nominal_columns is None:
return dataset
elif nominal_columns == 'all':
nominal_columns = dataset.columns
converted_dataset = pd.DataFrame()
binary_columns_dict = dict()
for col in dataset.columns:
if col not in nominal_columns:
converted_dataset.loc[:, col] = dataset[col]
else:
unique_values = pd.unique(dataset[col])
if len(unique_values) == 1 and not drop_single_label:
converted_dataset.loc[:, col] = 0
elif len(unique_values) == 2:
converted_dataset.loc[:, col], binary_columns_dict[col] = pd.factorize(dataset[col])
else:
dummies = pd.get_dummies(dataset[col], prefix=col)
converted_dataset = pd.concat([converted_dataset, dummies], axis=1)
if drop_fact_dict:
return converted_dataset
else:
return converted_dataset, binary_columns_dict
def skip_diag_strided(A):
m = A.shape[0]
strided = np.lib.stride_tricks.as_strided
s0, s1 = A.strides
return strided(A.ravel()[1:], shape=(m - 1, m), strides=(s0 + s1, s1)).reshape(m, -1)
def plot_mean_std_comparison(evaluators):
nr_plots = len(evaluators)
fig, ax = plt.subplots(2, nr_plots, figsize=(4 * nr_plots, 7))
flat_ax = ax.flatten()
for i in range(nr_plots):
plot_mean_std(evaluators[i].real, evaluators[i].fake, ax=ax[:, i])
titles = ['TGAN', 'TGAN-WGAN-GP', 'TGAN-skip', 'MedGAN', 'TableGAN']
for i, label in enumerate(titles):
title_font = {'size': '24'}
flat_ax[i].set_title(label, **title_font)
plt.tight_layout()
def plot_mean_std(real, fake, ax=None):
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
fig.suptitle('Absolute Log Mean and STDs of numeric data\n', fontsize=16)
real = real._get_numeric_data()
fake = fake._get_numeric_data()
real_mean = np.log(np.add(abs(real.mean()).values, 1e-5))
fake_mean = np.log(np.add(abs(fake.mean()).values, 1e-5))
min_mean = min(real_mean) - 1
max_mean = max(real_mean) + 1
line = np.arange(min_mean, max_mean)
sns.lineplot(x=line, y=line, ax=ax[0])
sns.scatterplot(x=real_mean,
y=fake_mean,
ax=ax[0])
ax[0].set_title('Means of real and fake data')
ax[0].set_xlabel('real data mean (log)')
ax[0].set_ylabel('fake data mean (log)')
real_std = np.log(np.add(real.std().values, 1e-5))
fake_std = np.log(np.add(fake.std().values, 1e-5))
min_std = min(real_std) - 1
max_std = max(real_std) + 1
line = np.arange(min_std, max_std)
sns.lineplot(x=line, y=line, ax=ax[1])
sns.scatterplot(x=real_std,
y=fake_std,
ax=ax[1])
ax[1].set_title('Stds of real and fake data')
ax[1].set_xlabel('real data std (log)')
ax[1].set_ylabel('fake data std (log)')
ax[0].grid(True)
ax[1].grid(True)
if ax is None:
plt.show()
class DataEvaluator:
def __init__(self, real, fake, unique_thresh=55, metric='pearsonr', verbose=False, n_samples=None):
if isinstance(real, np.ndarray):
real = pd.DataFrame(real)
fake = pd.DataFrame(fake)
assert isinstance(real, pd.DataFrame), f'Make sure you either pass a Pandas DataFrame or Numpy Array'
self.unique_thresh = unique_thresh
self.numerical_columns = [column for column in real._get_numeric_data().columns if
len(real[column].unique()) > unique_thresh]
self.categorical_columns = [column for column in real.columns if column not in self.numerical_columns]
self.real = real
self.fake = fake
self.comparison_metric = getattr(stats, metric)
self.verbose = verbose
if n_samples is None:
self.n_samples = min(len(self.real), len(self.fake))
elif len(fake) >= n_samples and len(real) >= n_samples:
self.n_samples = n_samples
else:
raise Exception(f'Make sure n_samples < len(fake/real). len(real): {len(real)}, len(fake): {len(fake)}')
self.real = self.real.sample(self.n_samples)
self.fake = self.fake.sample(self.n_samples)
assert len(self.real) == len(self.fake), f'len(real) != len(fake)'
def plot_mean_std(self):
plot_mean_std(self.real, self.fake)
def plot_cumsums(self):
nr_charts = len(self.real.columns)
nr_cols = 4
nr_rows = max(1, nr_charts // nr_cols)
nr_rows = nr_rows + 1 if nr_charts % nr_cols != 0 else nr_rows
max_len = 0
# Increase the length of plots if the labels are long
if not self.real.select_dtypes(include=['object']).empty:
lengths = []
for d in self.real.select_dtypes(include=['object']):
lengths.append(max([len(x.strip()) for x in self.real[d].unique().tolist()]))
max_len = max(lengths)
row_height = 6 + (max_len // 30)
fig, ax = plt.subplots(nr_rows, nr_cols, figsize=(16, row_height * nr_rows))
fig.suptitle('Cumulative Sums per feature', fontsize=16)
axes = ax.flatten()
for i, col in enumerate(self.real.columns):
r = self.real[col]
f = self.fake.iloc[:, self.real.columns.tolist().index(col)]
cdf(r, f, col, 'Cumsum', ax=axes[i])
plt.tight_layout(rect=[0, 0.02, 1, 0.98])
plt.show()
def plot_correlation_difference(self, plot_diff=True, *args, **kwargs):
plot_correlation_difference(self.real, self.fake, cat_cols=self.categorical_columns, plot_diff=plot_diff, *args,
**kwargs)
def correlation_distance(self, how='euclidean'):
"""
Calculate distance between correlation matrices with certain metric.
Metric options are: euclidean, mae (mean absolute error)
:param how: metric to measure distance
:return: distance
"""
distance_func = None
if how == 'euclidean':
distance_func = euclidean_distance
elif how == 'mae':
distance_func = mean_absolute_error
elif how == 'rmse':
distance_func = rmse
assert distance_func is not None, f'Distance measure was None. Please select a measure from [euclidean, mae]'
real_corr = associations(self.real, nominal_columns=self.categorical_columns, return_results=True, theil_u=True, plot=False)
fake_corr = associations(self.fake, nominal_columns=self.categorical_columns, return_results=True, theil_u=True, plot=False)
return distance_func(
real_corr.values,
fake_corr.values
)
def plot_2d(self):
"""
Plot the first two components of a PCA of the numeric columns of real and fake.
"""
real = numerical_encoding(self.real, nominal_columns=self.categorical_columns)
fake = numerical_encoding(self.fake, nominal_columns=self.categorical_columns)
pca_r = PCA(n_components=2)
pca_f = PCA(n_components=2)
real_t = pca_r.fit_transform(real)
fake_t = pca_f.fit_transform(fake)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
fig.suptitle('First two components of PCA', fontsize=16)
sns.scatterplot(ax=ax[0], x=real_t[:, 0], y=real_t[:, 1])
sns.scatterplot(ax=ax[1], x=fake_t[:, 0], y=fake_t[:, 1])
ax[0].set_title('Real data')
ax[1].set_title('Fake data')
plt.show()
def get_copies(self):
"""
Check whether any real values occur in the fake data
:return: Dataframe containing the duplicates
"""
# df = pd.concat([self.real, self.fake])
# duplicates = df[df.duplicated(keep=False)]
# return duplicates
real_hashes = self.real.apply(lambda x: hash(tuple(x)), axis=1)
fake_hashes = self.fake.apply(lambda x: hash(tuple(x)), axis=1)
dup_idxs = fake_hashes.isin(real_hashes.values)
dup_idxs = dup_idxs[dup_idxs == True].sort_index().index.tolist()
len(dup_idxs)
print(f'Nr copied columns: {len(dup_idxs)}')
return self.fake.loc[dup_idxs, :]
def get_duplicates(self, return_values=False):
real_duplicates = self.real[self.real.duplicated(keep=False)]
fake_duplicates = self.fake[self.fake.duplicated(keep=False)]
if return_values:
return real_duplicates, fake_duplicates
return len(real_duplicates), len(fake_duplicates)
def get_duplicates2(self, return_values=False):
df = pd.concat([self.real, self.fake])
duplicates = df[df.duplicated(keep=False)]
return duplicates
def pca_correlation(self, return_values=False):
self.pca_r = PCA(n_components=5)
self.pca_f = PCA(n_components=5)
real = self.real
fake = self.fake
real = numerical_encoding(real, nominal_columns=self.categorical_columns)
fake = numerical_encoding(fake, nominal_columns=self.categorical_columns)
self.pca_r.fit(real)
self.pca_f.fit(fake)
results = pd.DataFrame({'real': self.pca_r.explained_variance_, 'fake': self.pca_f.explained_variance_})
if self.verbose:
print(f'\nTop 5 PCA components:')
print(results.to_string())
# slope, intersect, corr, p, _ = stats.linregress(self.pca_r.explained_variance_, self.pca_f.explained_variance_)
# corr, p = stats.pearsonr(self.pca_r.explained_variance_, self.pca_f.explained_variance_)
# return corr
if return_values:
return results
pca_error = mean_absolute_percentage_error(np.log(self.pca_r.explained_variance_), np.log(self.pca_f.explained_variance_))
return 1 - pca_error
def fit_estimators(self):
"""
Fit self.r_estimators and self.f_estimators to real and fake data, respectively.
"""
if self.verbose:
print(f'\nFitting real')
for i, c in enumerate(self.r_estimators):
if self.verbose:
print(f'{i + 1}: {type(c).__name__}')
c.fit(self.real_x_train, self.real_y_train)
if self.verbose:
print(f'\nFitting fake')
for i, c in enumerate(self.f_estimators):
if self.verbose:
print(f'{i + 1}: {type(c).__name__}')
c.fit(self.fake_x_train, self.fake_y_train)
def score_estimators(self):
"""
Get F1 scores of self.r_estimators and self.f_estimators on the fake and real data, respectively.
:return:
"""
from sklearn.metrics import mean_squared_error
if self.target_type == 'class':
r2r = [f1_score(self.real_y_test, clf.predict(self.real_x_test), average='micro') for clf in self.r_estimators]
f2f = [f1_score(self.fake_y_test, clf.predict(self.fake_x_test), average='micro') for clf in self.f_estimators]
# Calculate test set accuracies on the other dataset
r2f = [f1_score(self.fake_y_test, clf.predict(self.fake_x_test), average='micro') for clf in self.r_estimators]
f2r = [f1_score(self.real_y_test, clf.predict(self.real_x_test), average='micro') for clf in self.f_estimators]
index = [f'real_data_{classifier}_F1' for classifier in self.estimator_names] + \
[f'fake_data_{classifier}_F1' for classifier in self.estimator_names]
results = pd.DataFrame({'real': r2r + r2f, 'fake': f2r + f2f}, index=index)
elif self.target_type == 'regr':
r2r = [rmse(self.real_y_test, clf.predict(self.real_x_test)) for clf in self.r_estimators]
f2f = [rmse(self.fake_y_test, clf.predict(self.fake_x_test)) for clf in self.f_estimators]
# Calculate test set accuracies on the other dataset
r2f = [rmse(self.fake_y_test, clf.predict(self.fake_x_test)) for clf in self.r_estimators]
f2r = [rmse(self.real_y_test, clf.predict(self.real_x_test)) for clf in self.f_estimators]
index = [f'real_data_{classifier}' for classifier in self.estimator_names] + \
[f'fake_data_{classifier}' for classifier in self.estimator_names]
results = pd.DataFrame({'real': r2r + r2f, 'fake': f2r + f2f}, index=index)
else:
raise Exception(f'self.target_type should be either \'class\' or \'regr\', but is {self.target_type}.')
return results
def visual_evaluation(self, plot=True, **kwargs):
if plot:
self.plot_mean_std()
self.plot_cumsums()
self.plot_correlation_difference(**kwargs)
self.plot_2d()
def statistical_evaluation(self):
total_metrics = pd.DataFrame()
for ds_name in ['real', 'fake']:
ds = getattr(self, ds_name)
metrics = {}
num_ds = ds[self.numerical_columns]
# Basic statistical properties
for idx, value in num_ds.mean().items():
metrics[f'mean_{idx}'] = value
for idx, value in num_ds.median().items():
metrics[f'median_{idx}'] = value
for idx, value in num_ds.std().items():
metrics[f'std_{idx}'] = value
for idx, value in num_ds.var().items():
metrics[f'variance_{idx}'] = value
total_metrics[ds_name] = metrics.values()
total_metrics.index = metrics.keys()
self.statistical_results = total_metrics
if self.verbose:
print('\nBasic statistical attributes:')
print(total_metrics.to_string())
corr, p = stats.spearmanr(total_metrics['real'], total_metrics['fake'])
return corr
def correlation_correlation(self):
total_metrics = pd.DataFrame()
for ds_name in ['real', 'fake']:
ds = getattr(self, ds_name)
corr_df = associations(ds, nominal_columns=self.categorical_columns, return_results=True, theil_u=True, plot=False)
values = corr_df.values
values = values[~np.eye(values.shape[0], dtype=bool)].reshape(values.shape[0], -1)
total_metrics[ds_name] = values.flatten()
self.correlation_correlations = total_metrics
corr, p = self.comparison_metric(total_metrics['real'], total_metrics['fake'])
if self.verbose:
print('\nColumn correlation between datasets:')
print(total_metrics.to_string())
return corr
def convert_numerical(self):
real = numerical_encoding(self.real, nominal_columns=self.categorical_columns)
columns = sorted(real.columns.tolist())
real = real[columns]
fake = numerical_encoding(self.fake, nominal_columns=self.categorical_columns)
for col in columns:
if col not in fake.columns.tolist():
fake[col] = 0
fake = fake[columns]
return real, fake
def estimator_evaluation(self, target_col, target_type='class'):
self.target_col = target_col
self.target_type = target_type
# Convert both datasets to numerical representations and split x and y
real_x = numerical_encoding(self.real.drop([target_col], axis=1), nominal_columns=self.categorical_columns)
columns = sorted(real_x.columns.tolist())
real_x = real_x[columns]
fake_x = numerical_encoding(self.fake.drop([target_col], axis=1), nominal_columns=self.categorical_columns)
for col in columns:
if col not in fake_x.columns.tolist():
fake_x[col] = 0
fake_x = fake_x[columns]
assert real_x.columns.tolist() == fake_x.columns.tolist(), f'real and fake columns are different: \n{real_x.columns}\n{fake_x.columns}'
if self.target_type == 'class':
# Encode real and fake target the same
real_y, uniques = pd.factorize(self.real[target_col])
mapping = {key: value for value, key in enumerate(uniques)}
fake_y = [mapping.get(key) for key in self.fake[target_col].tolist()]
elif self.target_type == 'regr':
real_y = self.real[target_col]
fake_y = self.fake[target_col]
else:
raise Exception(f'Target Type must be regr or class')
# split real and fake into train and test sets
self.real_x_train, self.real_x_test, self.real_y_train, self.real_y_test = train_test_split(real_x, real_y, test_size=0.2)
self.fake_x_train, self.fake_x_test, self.fake_y_train, self.fake_y_test = train_test_split(fake_x, fake_y, test_size=0.2)
if target_type == 'regr':
self.estimators = [
RandomForestRegressor(n_estimators=20, max_depth=5),
Lasso(),
Ridge(alpha=1.0),
ElasticNet(),
]
elif target_type == 'class':
self.estimators = [
# SGDClassifier(max_iter=100, tol=1e-3),
LogisticRegression(multi_class='auto', solver='lbfgs', max_iter=500),
RandomForestClassifier(n_estimators=10),
DecisionTreeClassifier(),
MLPClassifier([50, 50], solver='adam', activation='relu', learning_rate='adaptive'),
]
else:
raise Exception(f'target_type must be \'regr\' or \'class\'')
self.r_estimators = copy.deepcopy(self.estimators)
self.f_estimators = copy.deepcopy(self.estimators)
self.estimator_names = [type(clf).__name__ for clf in self.estimators]
for estimator in self.estimators:
assert hasattr(estimator, 'fit')
assert hasattr(estimator, 'score')
self.fit_estimators()
self.estimators_scores = self.score_estimators()
print('\nClassifier F1-scores:') if self.target_type == 'class' else print('\nRegressor MSE-scores:')
print(self.estimators_scores.to_string())
if self.target_type == 'regr':
corr, p = self.comparison_metric(self.estimators_scores['real'], self.estimators_scores['fake'])
return corr
elif self.target_type == 'class':
mean = mean_absolute_percentage_error(self.estimators_scores['real'], self.estimators_scores['fake'])
return 1 - mean
def row_distance(self, n=None):
if n is None:
n = len(self.real)
real = numerical_encoding(self.real, nominal_columns=self.categorical_columns)
fake = numerical_encoding(self.fake, nominal_columns=self.categorical_columns)
columns = sorted(real.columns.tolist())
real = real[columns]
for col in columns:
if col not in fake.columns.tolist():
fake[col] = 0
fake = fake[columns]
for column in real.columns.tolist():
if len(real[column].unique()) > 2:
real[column] = (real[column] - real[column].mean()) / real[column].std()
fake[column] = (fake[column] - fake[column].mean()) / fake[column].std()
assert real.columns.tolist() == fake.columns.tolist()
distances = cdist(real[:n], fake[:n])
min_distances = np.min(distances, axis=1)
min_mean = np.mean(min_distances)
min_std = np.std(min_distances)
return min_mean, min_std
def evaluate(self, target_col, target_type='class', metric=None, verbose=None):
"""
Determine correlation between attributes from the real and fake dataset using a given metric.
All metrics from scipy.stats are available.
:param target_col: column to use for predictions with estimators
:param n_samples: the number of samples to use for the estimators. Training time scales mostly linear
:param metric: scoring metric for the attributes. By default Kendall Tau ranking is used. Alternatives
include Spearman rho (scipy.stats.spearmanr) ranking.
"""
if verbose is not None:
self.verbose = verbose
if metric is not None:
self.comparison_metric = metric
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
pd.options.display.float_format = '{:,.4f}'.format
print(f'\nCorrelation metric: {self.comparison_metric.__name__}')
basic_statistical = self.statistical_evaluation() # 2 columns -> Corr -> correlation coefficient
correlation_correlation = self.correlation_correlation() # 2 columns -> Kendall Tau -> Correlation coefficient
column_correlation = column_correlations(self.real, self.fake, self.categorical_columns) # 1 column -> Mean
estimators = self.estimator_evaluation(target_col=target_col, target_type=target_type) # 1 2 columns -> Kendall Tau -> Correlation coefficient
pca_variance = self.pca_correlation() # 1 number
nearest_neighbor = self.row_distance(n=20000)
miscellaneous = {}
miscellaneous['Column Correlation Distance RMSE'] = self.correlation_distance(how='rmse')
miscellaneous['Column Correlation distance MAE'] = self.correlation_distance(how='mae')
miscellaneous['Duplicate rows between sets'] = len(self.get_duplicates())
miscellaneous['nearest neighbor mean'] = nearest_neighbor[0]
miscellaneous['nearest neighbor std'] = nearest_neighbor[1]
miscellaneous_df = pd.DataFrame({'Result': list(miscellaneous.values())}, index=list(miscellaneous.keys()))
print(f'\nMiscellaneous results:')
print(miscellaneous_df.to_string())
all_results = {
'basic statistics': basic_statistical,
'Correlation column correlations': correlation_correlation,
'Mean Correlation between fake and real columns': column_correlation,
f'{"1 - MAPE Estimator results" if self.target_type == "class" else "Correlation RMSE"}': estimators,
'1 - MAPE 5 PCA components': pca_variance,
}
total_result = np.mean(list(all_results.values()))
all_results['Total Result'] = total_result
all_results_df = pd.DataFrame({'Result': list(all_results.values())}, index=list(all_results.keys()))
print(f'\nResults:\nNumber of duplicate rows is ignored for total score.')
print(all_results_df.to_string())
| 44.870813
| 158
| 0.636036
|
93223cd882c2be90b01bfe2a0a09b6eae746c2f6
| 12,258
|
py
|
Python
|
tests/test_encrypted_fields.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
tests/test_encrypted_fields.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
tests/test_encrypted_fields.py
|
kaozdl/django-extensions
|
bbc3ae686d2cba9c0bb0a6b88f5e71ddf1a6af36
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import shutil
import tempfile
from contextlib import contextmanager
import pytest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, models
from django.forms.widgets import Textarea, TextInput
from django.test import TestCase
from django.test.utils import override_settings
from .testapp.models import Secret
# Only perform encrypted fields tests if keyczar is present. Resolves
# http://github.com/django-extensions/django-extensions/issues/#issue/17
try:
from django_extensions.db.fields.encrypted import BaseEncryptedField, EncryptedCharField, EncryptedTextField
from keyczar import keyczar, keyczart, keyinfo # NOQA
keyczar_active = True
except ImportError:
keyczar_active = False
# Locations of both private and public keys.
KEY_LOCS = {}
@pytest.fixture(scope="class")
def keyczar_keys(request):
# If KeyCzar is available, set up the environment.
if keyczar_active:
# Create an RSA private key.
keys_dir = tempfile.mkdtemp("django_extensions_tests_keyzcar_rsa_dir")
keyczart.Create(keys_dir, "test", keyinfo.DECRYPT_AND_ENCRYPT, asymmetric=True)
keyczart.AddKey(keys_dir, "PRIMARY", size=4096)
KEY_LOCS['DECRYPT_AND_ENCRYPT'] = keys_dir
# Create an RSA public key.
pub_dir = tempfile.mkdtemp("django_extensions_tests_keyzcar_pub_dir")
keyczart.PubKey(keys_dir, pub_dir)
KEY_LOCS['ENCRYPT'] = pub_dir
# cleanup crypto key temp dirs
def cleanup():
for name, path in KEY_LOCS.items():
shutil.rmtree(path)
request.addfinalizer(cleanup)
@contextmanager
def keys(purpose, mode=None):
"""
A context manager that sets up the correct KeyCzar environment for a test.
Arguments:
purpose: Either keyczar.keyinfo.DECRYPT_AND_ENCRYPT or
keyczar.keyinfo.ENCRYPT.
mode: If truthy, settings.ENCRYPTED_FIELD_MODE will be set to (and then
reverted from) this value. If falsy, settings.ENCRYPTED_FIELD_MODE
will not be changed. Optional. Default: None.
Yields:
A Keyczar subclass for the stated purpose. This will be keyczar.Crypter
for DECRYPT_AND_ENCRYPT or keyczar.Encrypter for ENCRYPT. In addition,
settings.ENCRYPTED_FIELD_KEYS_DIR will be set correctly, and then
reverted when the manager exits.
"""
# Store the original settings so we can restore when the manager exits.
orig_setting_dir = getattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR', None)
orig_setting_mode = getattr(settings, 'ENCRYPTED_FIELD_MODE', None)
try:
if mode:
settings.ENCRYPTED_FIELD_MODE = mode
if purpose == keyinfo.DECRYPT_AND_ENCRYPT:
settings.ENCRYPTED_FIELD_KEYS_DIR = KEY_LOCS['DECRYPT_AND_ENCRYPT']
yield keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
else:
settings.ENCRYPTED_FIELD_KEYS_DIR = KEY_LOCS['ENCRYPT']
yield keyczar.Encrypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
except Exception:
raise # Reraise any exceptions.
finally:
# Restore settings.
settings.ENCRYPTED_FIELD_KEYS_DIR = orig_setting_dir
if mode:
if orig_setting_mode:
settings.ENCRYPTED_FIELD_MODE = orig_setting_mode
else:
del settings.ENCRYPTED_FIELD_MODE
@contextmanager
def secret_model():
"""
A context manager that yields a Secret model defined at runtime.
All EncryptedField init logic occurs at model class definition time, not at
object instantiation time. This means that in order to test different keys
and modes, we must generate a new class definition at runtime, after
establishing the correct KeyCzar settings. This context manager handles
that process.
See https://dynamic-models.readthedocs.io/en/latest/ and
https://docs.djangoproject.com/en/dev/topics/db/models/
#differences-between-proxy-inheritance-and-unmanaged-models
"""
# Create a new class that shadows tests.models.Secret.
attrs = {
'name': EncryptedCharField("Name", max_length=Secret._meta.get_field('name').max_length),
'text': EncryptedTextField("Text"),
'__module__': 'tests.testapp.models',
'Meta': type('Meta', (object, ), {
'managed': False,
'db_table': Secret._meta.db_table
})
}
yield type('Secret', (models.Model, ), attrs)
@pytest.mark.skipif(keyczar_active is False, reason="Encrypted fields needs that keyczar is installed")
@pytest.mark.usefixtures("admin_user", "keyczar_keys")
class EncryptedFieldsTestCase(TestCase):
def test_char_field_create(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is encrypted in the database and can be decrypted.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT) as crypt:
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
cursor = connection.cursor()
query = "SELECT name FROM %s WHERE id = %d" % (model._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def test_char_field_read(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is decrypted when reading the value back from the
model.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.name)
def test_text_field_create(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is encrypted in the database and can be decrypted.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT) as crypt:
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(text=test_val)
cursor = connection.cursor()
query = "SELECT text FROM %s WHERE id = %d" % (model._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def test_text_field_read(self):
"""
Uses a private key to encrypt data on model creation.
Verifies the data is decrypted when reading the value back from the
model.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(text=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.text)
def test_cannot_decrypt(self):
"""
Uses a public key to encrypt data on model creation.
Verifies that the data cannot be decrypted using the same key.
"""
with keys(keyinfo.ENCRYPT, mode=keyinfo.ENCRYPT.name):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertNotEqual(test_val, retrieved_secret.name)
self.assertTrue(retrieved_secret.name.startswith(EncryptedCharField.prefix))
def test_unacceptable_purpose(self):
"""
Tries to create an encrypted field with a mode mismatch.
A purpose of "DECRYPT_AND_ENCRYPT" cannot be used with a public key,
since public keys cannot be used for decryption. This should raise an
exception.
"""
with self.assertRaises(keyczar.errors.KeyczarError):
with keys(keyinfo.ENCRYPT):
with secret_model():
# A KeyCzar exception should get raised during class
# definition time, so any code in here would never get run.
pass
def test_decryption_forbidden(self):
"""
Uses a private key to encrypt data, but decryption is not allowed.
ENCRYPTED_FIELD_MODE is explicitly set to ENCRYPT, meaning data should
not be decrypted, even though the key would allow for it.
"""
with keys(keyinfo.DECRYPT_AND_ENCRYPT, mode=keyinfo.ENCRYPT.name):
with secret_model() as model:
test_val = "Test Secret"
secret = model.objects.create(name=test_val)
retrieved_secret = model.objects.get(id=secret.id)
self.assertNotEqual(test_val, retrieved_secret.name)
self.assertTrue(retrieved_secret.name.startswith(EncryptedCharField.prefix))
def test_encrypt_public_decrypt_private(self):
"""
Uses a public key to encrypt, and a private key to decrypt data.
"""
test_val = "Test Secret"
# First, encrypt data with public key and save to db.
with keys(keyinfo.ENCRYPT, mode=keyinfo.ENCRYPT.name):
with secret_model() as model:
secret = model.objects.create(name=test_val)
enc_retrieved_secret = model.objects.get(id=secret.id)
self.assertNotEqual(test_val, enc_retrieved_secret.name)
self.assertTrue(enc_retrieved_secret.name.startswith(EncryptedCharField.prefix))
# Next, retrieve data from db, and decrypt with private key.
with keys(keyinfo.DECRYPT_AND_ENCRYPT):
with secret_model() as model:
retrieved_secret = model.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.name)
class BaseEncryptedFieldTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
keyczart.Create(cls.tmpdir, "test", keyinfo.DECRYPT_AND_ENCRYPT, asymmetric=True)
keyczart.AddKey(cls.tmpdir, "PRIMARY", size=4096)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
@pytest.mark.skipif(keyczar_active is False, reason="Encrypted fields needs that keyczar is installed")
class BaseEncryptedFieldExceptions(BaseEncryptedFieldTestCase):
"""Tests for BaseEncryptedField exceptions."""
def test_should_raise_ImproperlyConfigured_if_invalid_ENCRYPTED_FIELD_MODE_is_set(self):
with override_settings(ENCRYPTED_FIELD_KEYS_DIR=self.tmpdir, ENCRYPTED_FIELD_MODE='INVALID'):
with self.assertRaisesRegexp(ImproperlyConfigured, 'ENCRYPTED_FIELD_MODE must be either DECRYPT_AND_ENCRYPT or ENCRYPT, not INVALID.'):
BaseEncryptedField()
@pytest.mark.skipif(keyczar_active is False, reason="Encrypted fields needs that keyczar is installed")
class EncryptedTextFieldTests(BaseEncryptedFieldTestCase):
"""Tests for EncryptedTextField."""
def test_should_return_formfield_with_Textarea_widget(self):
with override_settings(ENCRYPTED_FIELD_KEYS_DIR=self.tmpdir):
formfield = EncryptedTextField(max_length=50).formfield()
self.assertTrue(isinstance(formfield.widget, Textarea))
@pytest.mark.skipif(keyczar_active is False, reason="Encrypted fields needs that keyczar is installed")
class EncryptedCharFieldTests(BaseEncryptedFieldTestCase):
"""Tests for EncryptedCharField."""
def test_should_return_formfield_with_TextInput_widget(self):
with override_settings(ENCRYPTED_FIELD_KEYS_DIR=self.tmpdir):
formfield = EncryptedCharField(max_length=50).formfield()
self.assertTrue(isinstance(formfield.widget, TextInput))
self.assertEqual(formfield.max_length, 700)
| 42.123711
| 147
| 0.676211
|
fed107fe85a8282872ddce7ebace8d252a1c14b1
| 10,604
|
py
|
Python
|
src/Honeybee_Set EnergyPlus Zone Thresholds.py
|
rdzeldenrust/Honeybee
|
e91e58badc1c9b082596d2cf97baeccdb6d7d0af
|
[
"CC-BY-3.0"
] | 1
|
2016-03-04T09:47:42.000Z
|
2016-03-04T09:47:42.000Z
|
src/Honeybee_Set EnergyPlus Zone Thresholds.py
|
rdzeldenrust/Honeybee
|
e91e58badc1c9b082596d2cf97baeccdb6d7d0af
|
[
"CC-BY-3.0"
] | null | null | null |
src/Honeybee_Set EnergyPlus Zone Thresholds.py
|
rdzeldenrust/Honeybee
|
e91e58badc1c9b082596d2cf97baeccdb6d7d0af
|
[
"CC-BY-3.0"
] | null | null | null |
# By Mostapha Sadeghipour Roudsari
# Sadeghipour@gmail.com
# Honeybee started by Mostapha Sadeghipour Roudsari is licensed
# under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
"""
Set Zone Thresholds
-
Provided by Honeybee 0.0.55
Args:
_HBZones:...
daylightThreshold_: ...
coolingSetPt_: ...
coolingSetback_: ...
heatingSetPt_: ...
heatingSetback_: ...
coolSuplyAirTemp_: ...
heatSupplyAirTemp_: ...
Returns:
HBZones:...
"""
ghenv.Component.Name = "Honeybee_Set EnergyPlus Zone Thresholds"
ghenv.Component.NickName = 'setEPZoneThresholds'
ghenv.Component.Message = 'VER 0.0.55\nSEP_11_2014'
ghenv.Component.Category = "Honeybee"
ghenv.Component.SubCategory = "08 | Energy | Set Zone Properties"
#compatibleHBVersion = VER 0.0.55\nAUG_25_2014
#compatibleLBVersion = VER 0.0.58\nAUG_20_2014
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import scriptcontext as sc
import Grasshopper.Kernel as gh
import uuid
def checkTheInputs():
#If the user puts in only one value, apply that value to all of the zones.
def duplicateData(data, calcLength):
dupData = []
for count in range(calcLength):
dupData.append(data[0])
return dupData
if len(daylightThreshold_) == 1: daylightThreshold = duplicateData(daylightThreshold_, len(_HBZones))
else: daylightThreshold = daylightThreshold_
if len(coolingSetback_) == 1: coolingSetback = duplicateData(coolingSetback_, len(_HBZones))
else: coolingSetback = coolingSetback_
if len(coolingSetPt_) == 1: coolingSetPt = duplicateData(coolingSetPt_, len(_HBZones))
else: coolingSetPt = coolingSetPt_
if len(heatingSetPt_) == 1: heatingSetPt = duplicateData(heatingSetPt_, len(_HBZones))
else: heatingSetPt = heatingSetPt_
if len(heatingSetback_) == 1: heatingSetback = duplicateData(heatingSetback_, len(_HBZones))
else: heatingSetback = heatingSetback_
if len(coolSupplyAirTemp_) == 1: coolSupplyAirTemp = duplicateData(coolSupplyAirTemp_, len(_HBZones))
else: coolSupplyAirTemp = coolSupplyAirTemp_
if len(heatSupplyAirTemp_) == 1: heatSupplyAirTemp = duplicateData(heatSupplyAirTemp_, len(_HBZones))
else: heatSupplyAirTemp = heatSupplyAirTemp_
return daylightThreshold, coolingSetPt, coolingSetback, heatingSetPt, heatingSetback, coolSupplyAirTemp, heatSupplyAirTemp
def updateSetPoints(schName, setPt, setBk):
"""
This function takes a setpoint schedule and change setPts and setbacks
and return the new yearly schedule.
The function is written for OpenStudioTemplate schedule and only works
for schedules which are structured similat to the template
"""
hb_EPScheduleAUX = sc.sticky["honeybee_EPScheduleAUX"]()
hb_EPObjectsAUX = sc.sticky["honeybee_EPObjectsAUX"]()
lb_preparation = sc.sticky["ladybug_Preparation"]()
setPt = str(setPt)
setBk = str(setBk)
if setPt=="" and setBk=="":
return schName
if hb_EPObjectsAUX.isSchedule(schName):
values, comments = hb_EPScheduleAUX.getScheduleDataByName(schName.upper(), ghenv.Component)
else:
return schName
scheduleType = values[0].lower()
if scheduleType != "schedule:year": return schName
# find all weekly schedules
numOfWeeklySchedules = int((len(values)-2)/5)
yearlyIndexes = []
yearlyValues = []
for i in range(numOfWeeklySchedules):
yearlyIndexCount = 5 * i + 2
weekDayScheduleName = values[yearlyIndexCount]
# find name of schedules for every day of the week
dailyScheduleNames, comments = hb_EPScheduleAUX.getScheduleDataByName(weekDayScheduleName.upper(), ghenv.Component)
weeklyIndexes = []
weeklyValues = []
for itemCount, dailySchedule in enumerate(dailyScheduleNames[1:]):
newName = ""
indexes = []
inValues = []
hourlyValues, comments = hb_EPScheduleAUX.getScheduleDataByName(dailySchedule.upper(), ghenv.Component)
numberOfSetPts = int((len(hourlyValues) - 3) /2)
# check if schedule has setback and give a warning if it doesn't
if numberOfSetPts == 1 and setBk!="":
warning = dailySchedule + " has no setback. Only setPt will be changed."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
print warning
# change the values in the list
if setBk!="" and numberOfSetPts == 3:
indexes.extend([5, 9])
inValues.extend([setBk, setBk])
newName += "setBk " + str(setBk) + " "
if setPt!="" and numberOfSetPts == 3:
indexes.append(7)
inValues.append(setPt)
newName += "setPt " + str(setPt) + " "
elif setPt!="" and numberOfSetPts == 1:
indexes.append(5)
inValues.append(setPt)
newName += "setPt " + str(setPt) + " "
# assign new name to be changed
indexes.append(1)
inValues.append(dailySchedule + newName)
# create a new object
original, updated = hb_EPObjectsAUX.customizeEPObject(dailySchedule.upper(), indexes, inValues)
# add to library
added, name = hb_EPObjectsAUX.addEPObjectToLib(updated, overwrite = True)
# collect indexes and names to update the weekly schedule
if added:
weeklyIndexes.append(itemCount + 2)
weeklyValues.append(name)
# modify the name of schedule
weeklyIndexes.append(1)
weeklyValues.append(newName + " {" + str(uuid.uuid4())+ "}")
# update weekly schedule based on new names
# create a new object
originalWeekly, updatedWeekly = hb_EPObjectsAUX.customizeEPObject(weekDayScheduleName.upper(), weeklyIndexes, weeklyValues)
# add to library
added, name = hb_EPObjectsAUX.addEPObjectToLib(updatedWeekly, overwrite = True)
if added:
# collect the changes for yearly schedule
yearlyIndexes.append(yearlyIndexCount + 1)
yearlyValues.append(name)
# update name
yearlyIndexes.append(1)
yearlyValues.append(schName + " " + newName)
# update yearly schedule
originalYear, updatedYear = hb_EPObjectsAUX.customizeEPObject(schName.upper(), yearlyIndexes, yearlyValues)
# add to library
added, name = hb_EPObjectsAUX.addEPObjectToLib(updatedYear, overwrite = True)
return name
def main(HBZones, daylightThreshold, coolingSetPt, heatingSetPt, coolingSetback, \
heatingSetback, coolSupplyAirTemp, heatSupplyAirTemp):
# check for Honeybee
if not sc.sticky.has_key('honeybee_release'):
print "You should first let Honeybee to fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let Honeybee to fly...")
return -1
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
" Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
# call the objects from the lib
hb_hive = sc.sticky["honeybee_Hive"]()
HBZonesFromHive = hb_hive.callFromHoneybeeHive(HBZones)
# assign the values
for zoneCount, zone in enumerate(HBZonesFromHive):
try:
zone.daylightThreshold = str(daylightThreshold[zoneCount])
print "Daylight threshold for " + zone.name + " is set to: " + zone.daylightThreshold
except: pass
try:
zone.coolingSetPt = str(coolingSetPt[zoneCount])
# print "Cooling setpoint for " + zone.name + " is set to: " + zone.coolingSetPt
except: pass
try:
zone.coolingSetback = str(coolingSetback[zoneCount])
# print "Cooling setback for " + zone.name + " is set to: " + zone.coolingSetback
except: pass
# update zone schedule based on new values
zone.coolingSetPtSchedule = updateSetPoints(zone.coolingSetPtSchedule, \
zone.coolingSetPt, zone.coolingSetback)
try:
zone.heatingSetPt = str(heatingSetPt[zoneCount])
# print "Heating setpoint for " + zone.name + " is set to: " + zone.heatingSetPt
except: pass
try:
zone.heatingSetback = str(heatingSetback[zoneCount])
# print "Heating setback for " + zone.name + " is set to: " + zone.heatingSetback
except: pass
# update zone schedule based on new values
zone.heatingSetPtSchedule = updateSetPoints(zone.heatingSetPtSchedule, \
zone.heatingSetPt, zone.heatingSetback)
try:
zone.coolSupplyAirTemp = str(coolSupplyAirTemp[zoneCount])
print "Cooling supply air temperture for " + zone.name + " is set to: " + zone.coolSupplyAirTemp
except: pass
try:
zone.heatSupplyAirTemp = str(heatSupplyAirTemp[zoneCount])
print "Heating supply air temperture for " + zone.name + " is set to: " + zone.heatSupplyAirTemp
except: pass
# send the zones back to the hive
HBZones = hb_hive.addToHoneybeeHive(HBZonesFromHive, ghenv.Component.InstanceGuid.ToString() + str(uuid.uuid4()))
return HBZones
if _HBZones:
daylightThreshold, coolingSetPt, coolingSetback, heatingSetPt, \
heatingSetback, coolSupplyAirTemp, heatSupplyAirTemp = checkTheInputs()
zones = main(_HBZones, daylightThreshold, coolingSetPt, heatingSetPt, \
coolingSetback, heatingSetback, coolSupplyAirTemp, heatSupplyAirTemp)
if zones!=-1:
HBZones = zones
| 38.007168
| 131
| 0.633912
|
ca8e1c64dfa2a60ed8a2918b805d2ff8bdbbc66d
| 12,033
|
py
|
Python
|
src/dhtmlparser/htmlelement/html_query.py
|
Bystroushaak/pyDHTMLParser
|
8444bd9f78f94b0d94ece8115a5f1c23fd71e641
|
[
"MIT"
] | 4
|
2017-05-18T00:21:22.000Z
|
2022-02-28T02:34:34.000Z
|
src/dhtmlparser/htmlelement/html_query.py
|
Bystroushaak/pyDHTMLParser
|
8444bd9f78f94b0d94ece8115a5f1c23fd71e641
|
[
"MIT"
] | 16
|
2015-02-14T06:27:23.000Z
|
2020-06-10T05:54:59.000Z
|
src/dhtmlparser/htmlelement/html_query.py
|
Bystroushaak/pyDHTMLParser
|
8444bd9f78f94b0d94ece8115a5f1c23fd71e641
|
[
"MIT"
] | 2
|
2016-01-25T14:35:05.000Z
|
2020-04-12T21:02:30.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from .html_parser import HTMLParser
from .html_parser import _is_str
from .html_parser import _is_dict
from .html_parser import _is_iterable
# Variables ===================================================================
# Functions & classes =========================================================
class HTMLQuery(HTMLParser):
def containsParamSubset(self, params):
"""
Test whether this element contains at least all `params`, or more.
Args:
params (dict/SpecialDict): Subset of parameters.
Returns:
bool: True if all `params` are contained in this element.
"""
for key in params.keys():
if key not in self.params:
return False
if params[key] != self.params[key]:
return False
return True
def isAlmostEqual(self, tag_name, params=None, fn=None,
case_sensitive=False):
"""
Compare element with given `tag_name`, `params` and/or by lambda
function `fn`.
Lambda function is same as in :meth:`find`.
Args:
tag_name (str): Compare just name of the element.
params (dict, default None): Compare also parameters.
fn (function, default None): Function which will be used for
matching.
case_sensitive (default False): Use case sensitive matching of the
`tag_name`.
Returns:
bool: True if two elements are almost equal.
"""
if isinstance(tag_name, self.__class__):
return self.isAlmostEqual(
tag_name.getTagName(),
tag_name.params if tag_name.params else None
)
# search by lambda function
if fn and not fn(self):
return False
# compare case sensitive?
comparator = self._tagname # we need to make self._tagname lower
if not case_sensitive and tag_name:
tag_name = tag_name.lower()
comparator = comparator.lower()
# compare tagname
if tag_name and tag_name != comparator:
return False
# None params = don't use parameters to compare equality
if params is None:
return True
# compare parameters
if params == self.params:
return True
# test whether `params` dict is subset of self.params
if not self.containsParamSubset(params):
return False
return True
def find(self, tag_name, params=None, fn=None, case_sensitive=False):
"""
Same as :meth:`findAll`, but without `endtags`.
You can always get them from :attr:`endtag` property.
"""
return [
x for x in self.findAll(tag_name, params, fn, case_sensitive)
if not x.isEndTag()
]
def findB(self, tag_name, params=None, fn=None, case_sensitive=False):
"""
Same as :meth:`findAllB`, but without `endtags`.
You can always get them from :attr:`endtag` property.
"""
return [
x for x in self.findAllB(tag_name, params, fn, case_sensitive)
if not x.isEndTag()
]
def findAll(self, tag_name, params=None, fn=None, case_sensitive=False):
"""
Search for elements by their parameters using `Depth-first algorithm
<http://en.wikipedia.org/wiki/Depth-first_search>`_.
Args:
tag_name (str): Name of the tag you are looking for. Set to "" if
you wish to use only `fn` parameter.
params (dict, default None): Parameters which have to be present
in tag to be considered matching.
fn (function, default None): Use this function to match tags.
Function expects one parameter which is HTMLElement instance.
case_sensitive (bool, default False): Use case sensitive search.
Returns:
list: List of :class:`HTMLElement` instances matching your \
criteria.
"""
output = []
if self.isAlmostEqual(tag_name, params, fn, case_sensitive):
output.append(self)
tmp = []
for el in self.childs:
tmp = el.findAll(tag_name, params, fn, case_sensitive)
if tmp:
output.extend(tmp)
return output
def findAllB(self, tag_name, params=None, fn=None, case_sensitive=False):
"""
Simple search engine using `Breadth-first algorithm
<http://en.wikipedia.org/wiki/Breadth-first_search>`_.
Args:
tag_name (str): Name of the tag you are looking for. Set to "" if
you wish to use only `fn` parameter.
params (dict, default None): Parameters which have to be present
in tag to be considered matching.
fn (function, default None): Use this function to match tags.
Function expects one parameter which is HTMLElement instance.
case_sensitive (bool, default False): Use case sensitive search.
Returns:
list: List of :class:`HTMLElement` instances matching your \
criteria.
"""
output = []
if self.isAlmostEqual(tag_name, params, fn, case_sensitive):
output.append(self)
breadth_search = self.childs
for el in breadth_search:
if el.isAlmostEqual(tag_name, params, fn, case_sensitive):
output.append(el)
if el.childs:
breadth_search.extend(el.childs)
return output
def wfind(self, tag_name, params=None, fn=None, case_sensitive=False):
"""
This methods works same as :meth:`find`, but only in one level of the
:attr:`childs`.
This allows to chain :meth:`wfind` calls::
>>> dom = dhtmlparser.parseString('''
... <root>
... <some>
... <something>
... <xe id="wanted xe" />
... </something>
... <something>
... asd
... </something>
... <xe id="another xe" />
... </some>
... <some>
... else
... <xe id="yet another xe" />
... </some>
... </root>
... ''')
>>> xe = dom.wfind("root").wfind("some").wfind("something").find("xe")
>>> xe
[<dhtmlparser.htmlelement.HTMLElement object at 0x8a979ac>]
>>> str(xe[0])
'<xe id="wanted xe" />'
Args:
tag_name (str): Name of the tag you are looking for. Set to "" if
you wish to use only `fn` parameter.
params (dict, default None): Parameters which have to be present
in tag to be considered matching.
fn (function, default None): Use this function to match tags.
Function expects one parameter which is HTMLElement instance.
case_sensitive (bool, default False): Use case sensitive search.
Returns:
obj: Blank HTMLElement with all matches in :attr:`childs` property.
Note:
Returned element also have set :attr:`_container` property to True.
"""
childs = self.childs
if self._container: # container object
childs = map(
lambda x: x.childs,
filter(lambda x: x.childs, self.childs)
)
childs = sum(childs, []) # flattern the list
el = self.__class__() # HTMLElement()
el._container = True
for child in childs:
if child.isEndTag():
continue
if child.isAlmostEqual(tag_name, params, fn, case_sensitive):
el.childs.append(child)
return el
def match(self, *args, **kwargs):
"""
:meth:`wfind` is nice function, but still kinda long to use, because
you have to manually chain all calls together and in the end, you get
:class:`HTMLElement` instance container.
This function recursively calls :meth:`wfind` for you and in the end,
you get list of matching elements::
xe = dom.match("root", "some", "something", "xe")
is alternative to::
xe = dom.wfind("root").wfind("some").wfind("something").wfind("xe")
You can use all arguments used in :meth:`wfind`::
dom = dhtmlparser.parseString('''
<root>
<div id="1">
<div id="5">
<xe id="wanted xe" />
</div>
<div id="10">
<xe id="another wanted xe" />
</div>
<xe id="another xe" />
</div>
<div id="2">
<div id="20">
<xe id="last wanted xe" />
</div>
</div>
</root>
''')
xe = dom.match(
"root",
{"tag_name": "div", "params": {"id": "1"}},
["div", {"id": "5"}],
"xe"
)
assert len(xe) == 1
assert xe[0].params["id"] == "wanted xe"
Args:
*args: List of :meth:`wfind` parameters.
absolute (bool, default None): If true, first element will be
searched from the root of the DOM. If None,
:attr:`_container` attribute will be used to decide value
of this argument. If False, :meth:`find` call will be run
first to find first element, then :meth:`wfind` will be
used to progress to next arguments.
Returns:
list: List of matching elements (empty list if no matching element\
is found).
"""
if not args:
return self.childs
# pop one argument from argument stack (tuples, so .pop() won't work)
act = args[0]
args = args[1:]
# this is used to define relative/absolute root of the first element
def wrap_find(*args, **kwargs):
"""
Find wrapper, to allow .wfind() to be substituted witřh .find()
call, which normally returns blank array instead of blank
`container` element.
"""
el = self.__class__() # HTMLElement()
el.childs = self.find(*args, **kwargs)
return el
# if absolute is not specified (ie - next recursive call), use
# self._container, which is set to True by .wfind(), so next search
# will be absolute from the given element
absolute = kwargs.get("absolute", None)
if absolute is None:
absolute = self._container
find_func = self.wfind if absolute else wrap_find
result = None
if _is_iterable(act):
result = find_func(*act)
elif _is_dict(act):
result = find_func(**act)
elif _is_str(act):
result = find_func(act)
else:
raise KeyError(
"Unknown parameter type '%s': %s" % (type(act), act)
)
if not result.childs:
return []
match = result.match(*args)
# just to be sure return always blank array, when the match is
# False/None and so on (it shouldn't be, but ..)
return match if match else []
| 34.777457
| 82
| 0.516164
|
f1f7f2cec8a2c5cc70310c5e0b5d04d1c5bdfc41
| 2,206
|
py
|
Python
|
python/constructor.py
|
IshitaTakeshi/Louds-Trie
|
32cb83cf9ac2cf8befa643f3265958502115949f
|
[
"MIT"
] | 18
|
2015-02-27T19:30:46.000Z
|
2021-05-01T13:05:55.000Z
|
python/constructor.py
|
IshitaTakeshi/Louds-Trie
|
32cb83cf9ac2cf8befa643f3265958502115949f
|
[
"MIT"
] | 2
|
2015-03-01T15:51:07.000Z
|
2016-10-18T02:24:41.000Z
|
python/constructor.py
|
IshitaTakeshi/Louds-Trie
|
32cb83cf9ac2cf8befa643f3265958502115949f
|
[
"MIT"
] | 4
|
2015-07-05T11:28:30.000Z
|
2019-05-24T00:50:15.000Z
|
class Node(object):
"""
The node of the tree.
Each node has one character as its member.
"""
def __init__(self, value):
self.value = value
self.children = []
self.visited = False
def __str__(self):
return str(self.value)
def add_child(self, child):
self.children.append(child)
class ArrayConstructor(object):
"""
This class has:
a function which constructs a tree by words
a function which dumps the tree as a LOUDS bit-string
"""
def __init__(self):
self.tree = Node('') #The root node
def add(self, word):
"""
Add a word to the tree
"""
self.build(self.tree, word)
def build(self, node, word, depth=0):
"""
Build a tree
"""
if(depth == len(word)):
return
for child in node.children:
# if the child which its value is word[depth] exists,
# continue building the tree from the next to the child.
if(child.value == word[depth]):
self.build(child, word, depth+1)
return
# if the child which its value is word[depth] doesn't exist,
# make a node and continue constructing the tree.
child = Node(word[depth])
node.add_child(child)
self.build(child, word, depth+1)
return
def show(self):
self.show_(self.tree)
def show_(self, node, depth=0):
print("{}{}".format(' '*depth, node))
for child in node.children:
self.show_(child, depth+1)
def dump(self):
"""
Dump a LOUDS bit-string
"""
from collections import deque
bit_array = [1, 0] # [1, 0] indicates the 0th node
labels = ['']
#dumps by Breadth-first search
queue = deque()
queue.append(self.tree)
while(len(queue) != 0):
node = queue.popleft()
labels.append(node.value)
bit_array += [1] * len(node.children) + [0]
for child in node.children:
child.visited = True
queue.append(child)
return bit_array, labels
| 25.952941
| 68
| 0.537625
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.