hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0256be489697647435f5e0ed8f5df55792a1d86 | 1,419 | py | Python | Lintcode/G_Practice/Tag_DFS/1288. Reconstruct Itinerary.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | Lintcode/G_Practice/Tag_DFS/1288. Reconstruct Itinerary.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | Lintcode/G_Practice/Tag_DFS/1288. Reconstruct Itinerary.py | ctc316/algorithm-python | ac4580d55e05e93e407c6156c9bb801808027d60 | [
"MIT"
] | null | null | null | class Solution:
"""
@param tickets:
@return: nothing
"""
## memory limitation
class Solution:
"""
@param tickets:
@return: nothing
"""
| 22.887097 | 60 | 0.513037 | class Solution:
"""
@param tickets:
@return: nothing
"""
def findItinerary(self, tickets):
targets = {}
for a, b in reversed(sorted(tickets)):
if a in targets:
targets[a].append(b)
else:
targets[a] = [b]
route = []
self.dfs('JFK', targets, route)
return route[::-1]
def dfs(self, airport, targets, route):
while airport in targets and targets[airport]:
self.dfs(targets[airport].pop(), targets, route)
route.append(airport)
## memory limitation
class GraphNode:
def __init__(self, val):
self.val = val
self.adjacents = set()
class Solution:
"""
@param tickets:
@return: nothing
"""
def findItinerary(self, tickets):
nodeMap = {}
for t in tickets:
if t[0] not in nodeMap:
nodeMap[t[0]] = GraphNode(t[0])
if t[1] not in nodeMap:
nodeMap[t[1]] = GraphNode(t[1])
for t in tickets:
nodeMap[t[0]].adjacents.add(nodeMap[t[1]])
result = []
self.dfs(nodeMap['JFK'], result)
return result
def dfs(self, node, result):
for adj in node.adjacents:
result.append(node.val)
self.dfs(adj, result)
if not node.adjacents:
result.append(node.val)
| 1,086 | -5 | 159 |
9d37a19bc1155bc32ed927f295a3cb94211d93de | 169 | py | Python | exercises/exc_01_04.py | throughput-ec/ec_workshops_py | 08d68c27fd916c34eb3636f6d382d6f9bf8ea969 | [
"MIT"
] | 1 | 2022-02-18T23:37:47.000Z | 2022-02-18T23:37:47.000Z | exercises/exc_01_04.py | LinkedEarth/ec_workshops_py | 44b4f8ea890da31311a51541a7f7e01c30a5acd1 | [
"MIT"
] | null | null | null | exercises/exc_01_04.py | LinkedEarth/ec_workshops_py | 44b4f8ea890da31311a51541a7f7e01c30a5acd1 | [
"MIT"
] | 2 | 2022-02-18T23:34:12.000Z | 2022-03-14T23:33:20.000Z | # Create the dictionary
mydict = {'archiveType':'coral',
'Publication':{'author':'J. Doe','title':'The most important record'}}
#print the keys
mydict.keys()
| 18.777778 | 78 | 0.656805 | # Create the dictionary
mydict = {'archiveType':'coral',
'Publication':{'author':'J. Doe','title':'The most important record'}}
#print the keys
mydict.keys()
| 0 | 0 | 0 |
0d2933b29d3348498d59c0280bde702c99c40087 | 1,474 | py | Python | main/permissions.py | felipeue/SmartBuilding | 57d904c6166c87f836bc8fada9eb5a2bc82069b8 | [
"MIT"
] | null | null | null | main/permissions.py | felipeue/SmartBuilding | 57d904c6166c87f836bc8fada9eb5a2bc82069b8 | [
"MIT"
] | null | null | null | main/permissions.py | felipeue/SmartBuilding | 57d904c6166c87f836bc8fada9eb5a2bc82069b8 | [
"MIT"
] | null | null | null | from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect
from main.models import *
| 44.666667 | 94 | 0.704206 | from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect
from main.models import *
class OwnerLoginRequiredMixin(object):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse_lazy("login"))
elif not Owner.objects.filter(user_origin=request.user).exists():
return HttpResponseRedirect(reverse_lazy("logout"))
else:
return super(OwnerLoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class ConciergeLoginRequiredMixin(object):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse_lazy("login"))
elif not Concierge.objects.filter(userOrigin=request.user).exists():
return HttpResponseRedirect(reverse_lazy("logout"))
else:
return super(ConciergeLoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class ResidentLoginRequiredMixin(object):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return HttpResponseRedirect(reverse_lazy("login"))
elif not Resident.objects.filter(userOrigin=request.user).exists():
return HttpResponseRedirect(reverse_lazy("logout"))
else:
return super(ResidentLoginRequiredMixin, self).dispatch(request, *args, **kwargs) | 1,146 | 58 | 147 |
bca545b47efd7a4c7d67cacbb56d24360f10b452 | 1,080 | py | Python | pyatv/support/packet.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 532 | 2017-02-01T19:23:28.000Z | 2022-03-29T09:57:39.000Z | pyatv/support/packet.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 1,639 | 2017-02-01T19:22:04.000Z | 2022-03-31T17:26:40.000Z | pyatv/support/packet.py | bdraco/pyatv | 9541d21e6101c60866d832626be97bf962774cd5 | [
"MIT"
] | 102 | 2017-02-02T01:42:13.000Z | 2022-02-26T08:49:34.000Z | """Generic utility for encoding and decoding binary packets."""
from collections import namedtuple
import struct
def defpacket(name: str, **kwargs):
"""Define a protocol packet."""
fmt: str = ">" + "".join(kwargs.values())
msg_type = namedtuple(name, kwargs.keys()) # type: ignore
return _MessageType
| 30.857143 | 88 | 0.581481 | """Generic utility for encoding and decoding binary packets."""
from collections import namedtuple
import struct
def defpacket(name: str, **kwargs):
"""Define a protocol packet."""
fmt: str = ">" + "".join(kwargs.values())
msg_type = namedtuple(name, kwargs.keys()) # type: ignore
class _MessageType:
length = struct.calcsize(fmt)
@staticmethod
def decode(data: bytes, allow_excessive=False):
"""Decode binary data as message."""
return msg_type._make(
struct.unpack(
fmt, data if not allow_excessive else data[0 : struct.calcsize(fmt)]
)
)
@staticmethod
def encode(*args) -> bytes:
"""Encode a message into binary data."""
return struct.pack(fmt, *args)
@staticmethod
def extend(ext_name, **ext_kwargs):
"""Extend a message type with additional fields."""
fields = {**kwargs, **ext_kwargs}
return defpacket(ext_name, **fields)
return _MessageType
| 0 | 732 | 27 |
dca6de27b3aa5a02b1fa5119d1cc0e8a557bea9a | 5,724 | py | Python | test/unit/test_terracoiny_things.py | walkjivefly/sentinel | b0b82b2974fd1d950285e1845a6fe311cf0fb9ea | [
"MIT"
] | 1 | 2018-01-18T18:52:41.000Z | 2018-01-18T18:52:41.000Z | test/unit/test_terracoiny_things.py | walkjivefly/sentinel | b0b82b2974fd1d950285e1845a6fe311cf0fb9ea | [
"MIT"
] | 1 | 2017-12-09T15:11:47.000Z | 2017-12-29T16:47:10.000Z | test/unit/test_terracoiny_things.py | walkjivefly/sentinel | b0b82b2974fd1d950285e1845a6fe311cf0fb9ea | [
"MIT"
] | 8 | 2017-12-01T18:01:43.000Z | 2018-12-31T13:39:34.000Z | import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
# ========================================================================
| 40.595745 | 390 | 0.780573 | import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
def valid_terracoin_address(network='mainnet'):
return 'mtHjXMx6dvTwgcvkDvsuWftiyNPi26RqDq' if (network == 'testnet') else '142So3onajFZfojruob5oqtxYT7SkU5Zcs'
@pytest.fixture
def invalid_terracoin_address(network='mainnet'):
return 'mtHjXMx6dvTwgcvkDvsuWftiyNPi26RqDr' if (network == 'testnet') else '142So3onajFZfojruob5oqtxYT7SkU5Zct'
@pytest.fixture
def current_block_hash():
return '000001c9ba1df5a1c58a4e458fb6febfe9329b1947802cd60a4ae90dd754b534'
@pytest.fixture
def mn_list():
from masternode import Masternode
masternodelist_full = {
u'701854b26809343704ab31d1c45abc08f9f83c5c2bd503a9d5716ef3c0cda857-1': u' ENABLED 70201 yjaFS6dudxUTxYPTDB9BYd1Nv4vMJXm3vK 1474157572 82842 1474152618 71111 52.90.74.124:18321',
u'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1': u' ENABLED 70201 yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L 1474157732 1590425 1474155175 71122 [2604:a880:800:a1::9b:0]:18321',
u'656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1': u' ENABLED 70201 yepN97UoBLoP2hzWnwWGRVTcWtw1niKwcB 1474157704 824622 1474152571 71110 178.62.203.249:18321',
}
mnlist = [Masternode(vin, mnstring) for (vin, mnstring) in masternodelist_full.items()]
return mnlist
@pytest.fixture
def mn_status_good():
# valid masternode status enabled & running
status = {
"vin": "CTxIn(COutPoint(f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56, 1), scriptSig=)",
"service": "[2604:a880:800:a1::9b:0]:18321",
"pubkey": "1FatXPh2s8JSamgj8LLUoiKMCWRaWABs4x",
"status": "Masternode successfully started"
}
return status
@pytest.fixture
def mn_status_bad():
# valid masternode but not running/waiting
status = {
"vin": "CTxIn(COutPoint(0000000000000000000000000000000000000000000000000000000000000000, 4294967295), coinbase )",
"service": "[::]:0",
"status": "Node just started, not yet activated"
}
return status
# ========================================================================
def test_valid_terracoin_address():
from terracoinlib import is_valid_terracoin_address
main = valid_terracoin_address()
test = valid_terracoin_address('testnet')
assert is_valid_terracoin_address(main) is True
assert is_valid_terracoin_address(main, 'mainnet') is True
assert is_valid_terracoin_address(main, 'testnet') is False
assert is_valid_terracoin_address(test) is False
assert is_valid_terracoin_address(test, 'mainnet') is False
assert is_valid_terracoin_address(test, 'testnet') is True
def test_invalid_terracoin_address():
from terracoinlib import is_valid_terracoin_address
main = invalid_terracoin_address()
test = invalid_terracoin_address('testnet')
assert is_valid_terracoin_address(main) is False
assert is_valid_terracoin_address(main, 'mainnet') is False
assert is_valid_terracoin_address(main, 'testnet') is False
assert is_valid_terracoin_address(test) is False
assert is_valid_terracoin_address(test, 'mainnet') is False
assert is_valid_terracoin_address(test, 'testnet') is False
def test_deterministic_masternode_elections(current_block_hash, mn_list):
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_deterministic_masternode_elections(current_block_hash, mn_list):
from terracoinlib import elect_mn
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_parse_masternode_status_vin():
from terracoinlib import parse_masternode_status_vin
status = mn_status_good()
vin = parse_masternode_status_vin(status['vin'])
assert vin == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
status = mn_status_bad()
vin = parse_masternode_status_vin(status['vin'])
assert vin is None
def test_hash_function():
import terracoinlib
sb_data_hex = '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030227d5d'
sb_hash = '5c7c28ddec8c1ad54b49f6f1e79369e7ccaf76f5ddc30e502569d674e458ccf3'
hex_hash = "%x" % terracoinlib.hashit(sb_data_hex)
assert hex_hash == sb_hash
def test_blocks_to_seconds():
import terracoinlib
from decimal import Decimal
precision = Decimal('0.001')
assert Decimal(terracoinlib.blocks_to_seconds(0)) == Decimal(0.0)
assert Decimal(terracoinlib.blocks_to_seconds(2)).quantize(precision) \
== Decimal(254.4).quantize(precision)
assert int(terracoinlib.blocks_to_seconds(16616)) == 2113555
| 5,001 | 0 | 293 |
7cff0fa89fc182d0643e9031f6c34e57731cef17 | 12,742 | py | Python | python/GafferSceneUI/ShaderTweaksUI.py | Tuftux/gaffer | 5acaf7cbfadbae841dc06854121ca85dcc5c338c | [
"BSD-3-Clause"
] | 1 | 2019-12-02T02:31:25.000Z | 2019-12-02T02:31:25.000Z | python/GafferSceneUI/ShaderTweaksUI.py | Tuftux/gaffer | 5acaf7cbfadbae841dc06854121ca85dcc5c338c | [
"BSD-3-Clause"
] | null | null | null | python/GafferSceneUI/ShaderTweaksUI.py | Tuftux/gaffer | 5acaf7cbfadbae841dc06854121ca85dcc5c338c | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
Gaffer.Metadata.registerNode(
GafferScene.ShaderTweaks,
"description",
"""
Makes modifications to shader parameter values.
""",
plugs = {
"shader" : [
"description",
"""
The type of shader to modify. This is actually the name
of an attribute which contains the shader network.
""",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"presetsPlugValueWidget:allowCustom", True,
"preset:None", "",
"layout:index", 0
],
"localise" : [
"description",
"""
Turn on to allow location-specific tweaks to be made to inherited
shaders. Shaders will be localised to locations matching the
node's filter prior to tweaking. The original inherited shader will
remain untouched.
""",
"layout:index", 1
],
"ignoreMissing" : [
"description",
"""
Ignores tweaks targeting missing parameters. When off, missing parameters
cause the node to error.
""",
"layout:index", 2
],
"tweaks" : [
"description",
"""
The tweaks to be made to the parameters of the shader.
Arbitrary numbers of user defined tweaks may be
added as children of this plug via the user
interface, or using the ShaderTweaks API via python.
""",
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
"layout:customWidget:footer:widgetType", "GafferSceneUI.ShaderTweaksUI._TweaksFooter",
"layout:customWidget:footer:index", -1,
"nodule:type", "GafferUI::CompoundNodule",
"noduleLayout:section", "left",
"noduleLayout:spacing", 0.2,
# Add + button for showing and hiding parameters in the GraphEditor
"noduleLayout:customGadget:addButton:gadgetType", "GafferSceneUI.ShaderTweaksUI.PlugAdder",
],
"tweaks.*" : [
"noduleLayout:visible", False, # Can be shown individually using PlugAdder above
],
}
)
##########################################################################
# Internal utilities
##########################################################################
##########################################################################
# _TweaksFooter
##########################################################################
##########################################################################
# PlugValueWidget context menu
##########################################################################
GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False )
##########################################################################
# Nodule context menu
##########################################################################
GafferUI.GraphEditor.plugContextMenuSignal().connect( __graphEditorPlugContextMenu, scoped = False )
| 28.828054 | 115 | 0.667713 | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
Gaffer.Metadata.registerNode(
GafferScene.ShaderTweaks,
"description",
"""
Makes modifications to shader parameter values.
""",
plugs = {
"shader" : [
"description",
"""
The type of shader to modify. This is actually the name
of an attribute which contains the shader network.
""",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
"presetsPlugValueWidget:allowCustom", True,
"preset:None", "",
"layout:index", 0
],
"localise" : [
"description",
"""
Turn on to allow location-specific tweaks to be made to inherited
shaders. Shaders will be localised to locations matching the
node's filter prior to tweaking. The original inherited shader will
remain untouched.
""",
"layout:index", 1
],
"ignoreMissing" : [
"description",
"""
Ignores tweaks targeting missing parameters. When off, missing parameters
cause the node to error.
""",
"layout:index", 2
],
"tweaks" : [
"description",
"""
The tweaks to be made to the parameters of the shader.
Arbitrary numbers of user defined tweaks may be
added as children of this plug via the user
interface, or using the ShaderTweaks API via python.
""",
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
"layout:customWidget:footer:widgetType", "GafferSceneUI.ShaderTweaksUI._TweaksFooter",
"layout:customWidget:footer:index", -1,
"nodule:type", "GafferUI::CompoundNodule",
"noduleLayout:section", "left",
"noduleLayout:spacing", 0.2,
# Add + button for showing and hiding parameters in the GraphEditor
"noduleLayout:customGadget:addButton:gadgetType", "GafferSceneUI.ShaderTweaksUI.PlugAdder",
],
"tweaks.*" : [
"noduleLayout:visible", False, # Can be shown individually using PlugAdder above
],
}
)
##########################################################################
# Internal utilities
##########################################################################
def _shaderTweaksNode( plugValueWidget ) :
# The plug may not belong to a ShaderTweaks node
# directly. Instead it may have been promoted
# elsewhere and be driving a target plug on a
# ShaderTweaks node.
def walkOutputs( plug ) :
if isinstance( plug.node(), GafferScene.ShaderTweaks ) :
return plug.node()
for output in plug.outputs() :
node = walkOutputs( output )
if node is not None :
return node
return walkOutputs( plugValueWidget.getPlug() )
def _pathsFromAffected( plugValueWidget ) :
node = _shaderTweaksNode( plugValueWidget )
if node is None :
return []
pathMatcher = IECore.PathMatcher()
with plugValueWidget.getContext() :
GafferScene.SceneAlgo.matchingPaths( node["filter"], node["in"], pathMatcher )
return pathMatcher.paths()
def _pathsFromSelection( plugValueWidget ) :
node = _shaderTweaksNode( plugValueWidget )
if node is None :
return []
paths = GafferSceneUI.ContextAlgo.getSelectedPaths( plugValueWidget.getContext() )
paths = paths.paths() if paths else []
with plugValueWidget.getContext() :
paths = [ p for p in paths if node["in"].exists( p ) ]
return paths
def _shaderAttributes( plugValueWidget, paths, affectedOnly ) :
result = {}
node = _shaderTweaksNode( plugValueWidget )
if node is None :
return result
with plugValueWidget.getContext() :
useFullAttr = node["localise"].getValue()
attributeNamePatterns = node["shader"].getValue() if affectedOnly else "*"
for path in paths :
attributes = node["in"].fullAttributes( path ) if useFullAttr else node["in"].attributes( path )
for name, attribute in attributes.items() :
if not IECore.StringAlgo.matchMultiple( name, attributeNamePatterns ) :
continue
if not isinstance( attribute, IECoreScene.ShaderNetwork ) or not len( attribute ) :
continue
result.setdefault( path, {} )[name] = attribute
return result
##########################################################################
# _TweaksFooter
##########################################################################
class _TweaksFooter( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
GafferUI.PlugValueWidget.__init__( self, row, plug )
with row :
GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
)
GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } )
def _updateFromPlug( self ) :
self.setEnabled( self._editable() )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
result.append(
"/From Affected",
{
"subMenu" : Gaffer.WeakMethod( self.__addFromAffectedMenuDefinition )
}
)
result.append(
"/From Selection",
{
"subMenu" : Gaffer.WeakMethod( self.__addFromSelectedMenuDefinition )
}
)
result.append( "/FromPathsDivider", { "divider" : True } )
# TODO - would be nice to share these default options with other users of TweakPlug
for item in [
Gaffer.BoolPlug,
Gaffer.FloatPlug,
Gaffer.IntPlug,
"NumericDivider",
Gaffer.StringPlug,
"StringDivider",
Gaffer.V2iPlug,
Gaffer.V3iPlug,
Gaffer.V2fPlug,
Gaffer.V3fPlug,
"VectorDivider",
Gaffer.Color3fPlug,
Gaffer.Color4fPlug
] :
if isinstance( item, basestring ) :
result.append( "/" + item, { "divider" : True } )
else :
result.append(
"/" + item.__name__.replace( "Plug", "" ),
{
"command" : functools.partial( Gaffer.WeakMethod( self.__addTweak ), "", item ),
}
)
return result
def __addFromAffectedMenuDefinition( self ) :
return self.__addFromPathsMenuDefinition( _pathsFromAffected( self ) )
def __addFromSelectedMenuDefinition( self ) :
return self.__addFromPathsMenuDefinition( _pathsFromSelection( self ) )
def __addFromPathsMenuDefinition( self, paths ) :
result = IECore.MenuDefinition()
shaderAttributes = _shaderAttributes( self, paths, affectedOnly = True )
if not len( shaderAttributes ) :
result.append(
"/No Shaders Found", { "active" : False }
)
return result
shaders = {}
for attributes in shaderAttributes.values() :
for attributeName, network in attributes.items() :
for shaderName, shader in network.shaders().items() :
if shaderName == network.getOutput().shader :
shaderName = ""
shaderParameters = shaders.setdefault( shaderName, {} )
for parameterName, parameterValue in shader.parameters.items() :
if parameterName.startswith( "__" ) :
continue
shaderParameters[parameterName] = parameterValue
if not len( shaders ) :
result.append(
"/No Parameters Found", { "active" : False }
)
return result
for shaderName, shader in shaders.items() :
menuPrefix = "/"
tweakPrefix = ""
if len( shaders ) > 1 :
menuPrefix = "/Other/{0}/".format( shaderName ) if shaderName else "/Main/"
tweakPrefix = "{0}.".format( shaderName ) if shaderName else ""
for parameterName in sorted( shader.keys() ) :
result.append(
menuPrefix + parameterName,
{
"command" : functools.partial(
Gaffer.WeakMethod( self.__addTweak ),
tweakPrefix + parameterName, shader[parameterName]
)
}
)
return result
def __addTweak( self, name, plugTypeOrValue ) :
if isinstance( plugTypeOrValue, IECore.Data ) :
plug = GafferScene.TweakPlug( name, plugTypeOrValue )
else :
plug = GafferScene.TweakPlug( name, plugTypeOrValue() )
if name :
plug.setName( name.replace( ".", "_" ) )
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().addChild( plug )
##########################################################################
# PlugValueWidget context menu
##########################################################################
def __setShaderFromAffectedMenuDefinition( menu ) :
plugValueWidget = menu.ancestor( GafferUI.PlugValueWidget )
return __setShaderFromPathsMenuDefinition( plugValueWidget, _pathsFromAffected( plugValueWidget ) )
def __setShaderFromSelectionMenuDefinition( menu ) :
plugValueWidget = menu.ancestor( GafferUI.PlugValueWidget )
return __setShaderFromPathsMenuDefinition( plugValueWidget, _pathsFromSelection( plugValueWidget ) )
def __setShader( plug, value ) :
with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) :
plug.setValue( value )
def __setShaderFromPathsMenuDefinition( plugValueWidget, paths ) :
shaderAttributes = _shaderAttributes( plugValueWidget, paths, affectedOnly = False )
names = set().union( *[ set( a.keys() ) for a in shaderAttributes.values() ] )
result = IECore.MenuDefinition()
for name in sorted( names ) :
result.append(
"/" + name,
{
"command" : functools.partial( __setShader, plugValueWidget.getPlug(), name ),
"active" : not plugValueWidget.getReadOnly() and not Gaffer.MetadataAlgo.readOnly( plugValueWidget.getPlug() ),
}
)
return result
def __plugPopupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
if plug is None :
return
node = plug.node()
if not isinstance( node, GafferScene.ShaderTweaks ) :
return
if plug != node["shader"] :
return
menuDefinition.prepend( "/ShaderTweaksDivider/", { "divider" : True } )
menuDefinition.prepend( "/From Selection/", { "subMenu" : __setShaderFromSelectionMenuDefinition } )
menuDefinition.prepend( "/From Affected/", { "subMenu" : __setShaderFromAffectedMenuDefinition } )
GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False )
##########################################################################
# Nodule context menu
##########################################################################
def __setPlugMetadata( plug, key, value ) :
with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( plug, key, value )
def __graphEditorPlugContextMenu( graphEditor, plug, menuDefinition ) :
if not isinstance( plug.node(), GafferScene.ShaderTweaks ) :
return
tweakPlug = plug.parent()
if not isinstance( tweakPlug, GafferScene.TweakPlug ) :
return False
if tweakPlug.parent() != plug.node()["tweaks"] :
return
if len( menuDefinition.items() ) :
menuDefinition.append( "/HideDivider", { "divider" : True } )
menuDefinition.append(
"/Hide",
{
"command" : functools.partial( __setPlugMetadata, tweakPlug, "noduleLayout:visible", False ),
"active" : plug.getInput() is None and not Gaffer.MetadataAlgo.readOnly( tweakPlug ),
}
)
GafferUI.GraphEditor.plugContextMenuSignal().connect( __graphEditorPlugContextMenu, scoped = False )
| 7,623 | 28 | 444 |
b1418d284e80e7cd51b88da272019b6cdd11dd83 | 711 | py | Python | models/scene_export.py | freds72/hl3 | 44bbbf7715cf9e0b01a3de34436a69ee79d699db | [
"Apache-2.0"
] | 3 | 2019-12-09T20:26:37.000Z | 2021-07-07T15:08:52.000Z | models/scene_export.py | freds72/hl3 | 44bbbf7715cf9e0b01a3de34436a69ee79d699db | [
"Apache-2.0"
] | null | null | null | models/scene_export.py | freds72/hl3 | 44bbbf7715cf9e0b01a3de34436a69ee79d699db | [
"Apache-2.0"
] | 2 | 2019-09-03T03:29:38.000Z | 2020-09-14T03:40:29.000Z | import bpy
import bmesh
import argparse
import sys
import math
from mathutils import Vector, Matrix
scene = bpy.context.scene
scene_data=[]
all_objects = [ob for ob in scene.objects if ob.layers[0] and ob.type=='MESH']
for ob in all_objects:
obdata = ob.data
ob_name_tokens= ob.name.split('.')
ob_data={
"model": ob.name if len(ob_name_tokens)==1 else ob_name_tokens[0],
"pos":[round(ob.location.x,2), round(ob.location.z,2), round(ob.location.y,2)],
"rotation":[round(math.degrees(ob.rotation_euler.x)/360,2)-0.5,round(math.degrees(ob.rotation_euler.z)/360,2)-0.5,round(math.degrees(ob.rotation_euler.y)/360,2)-0.5]
}
scene_data.append(ob_data)
print(scene_data)
| 30.913043 | 173 | 0.701828 | import bpy
import bmesh
import argparse
import sys
import math
from mathutils import Vector, Matrix
scene = bpy.context.scene
scene_data=[]
all_objects = [ob for ob in scene.objects if ob.layers[0] and ob.type=='MESH']
for ob in all_objects:
obdata = ob.data
ob_name_tokens= ob.name.split('.')
ob_data={
"model": ob.name if len(ob_name_tokens)==1 else ob_name_tokens[0],
"pos":[round(ob.location.x,2), round(ob.location.z,2), round(ob.location.y,2)],
"rotation":[round(math.degrees(ob.rotation_euler.x)/360,2)-0.5,round(math.degrees(ob.rotation_euler.z)/360,2)-0.5,round(math.degrees(ob.rotation_euler.y)/360,2)-0.5]
}
scene_data.append(ob_data)
print(scene_data)
| 0 | 0 | 0 |
7eded74169c3766918ffc348574ee4bb343f56a3 | 9,871 | py | Python | cave/analyzer/performance/overview_table.py | deslay1/CAVE | e4b9abc3812034f49dddd27ffc17dbab39782a1c | [
"BSD-3-Clause"
] | 45 | 2018-01-11T11:26:11.000Z | 2021-06-22T06:14:39.000Z | cave/analyzer/performance/overview_table.py | deslay1/CAVE | e4b9abc3812034f49dddd27ffc17dbab39782a1c | [
"BSD-3-Clause"
] | 150 | 2017-12-20T16:14:45.000Z | 2021-09-28T11:26:33.000Z | cave/analyzer/performance/overview_table.py | automl/SpySMAC | afcbecd0b9cb97276625c16a89cb6df141e6f6f2 | [
"BSD-3-Clause"
] | 17 | 2018-03-17T04:46:09.000Z | 2021-02-18T18:31:38.000Z | import os
from collections import OrderedDict
import numpy as np
from ConfigSpace.hyperparameters import NumericalHyperparameter, CategoricalHyperparameter, OrdinalHyperparameter, \
Constant
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.helpers import get_config_origin
class OverviewTable(BaseAnalyzer):
"""
Meta data, i.e. number of instances and parameters as well as configuration budget. Statistics apply to the
best run, if multiple configurator runs are compared.
"""
def run(self):
""" Generate tables. """
scenario = self.runscontainer.scenario
# General infos
general_dict = self._general_dict(scenario)
html_table_general = DataFrame(data=OrderedDict([('General', general_dict)]))
html_table_general = html_table_general.reindex(list(general_dict.keys()))
html_table_general = html_table_general.to_html(escape=False, header=False, justify='left')
self.result["General"] = {"table": html_table_general,
"tooltip": "General information about the optimization scenario."}
# Run-specific / budget specific infos
for mode in ['parallel', 'budget']:
runspec_dict = self._runspec_dict(identify=mode)
if not runspec_dict:
continue
order_spec = list(list(runspec_dict.values())[0].keys()) # Get keys of any sub-dict for order
html_table_specific = DataFrame(runspec_dict)
html_table_specific = html_table_specific.reindex(order_spec)
html_table_specific = html_table_specific.to_html(escape=False, justify='left')
if mode == 'parallel':
self.result["Parallel Runs"] = {"table": html_table_specific,
"tooltip": "Information to individual parallel runs."}
if mode == 'budget':
self.result["Budgets"] = {"table": html_table_specific,
"tooltip": "Statistics related to the budgets used in this optimization."}
# ConfigSpace in tabular form
cs_dict = self._configspace(scenario.cs)
cs_table = DataFrame(data=cs_dict)
html_table_cs = cs_table.to_html(escape=False, justify='left', index=False)
self.result["Configuration Space"] = {"table": html_table_cs,
"tooltip": "The parameter configuration space. "
"(See github.com/automl/ConfigSpace)"}
return self.result
def _general_dict(self, scenario):
""" Generate the meta-information that holds for all runs (scenario info etc)
Parameters
----------
scenario: smac.Scenario
scenario file to get information from
"""
# general stores information that holds for all runs, runspec holds information on a run-basis
general = OrderedDict()
if len(self.runscontainer.get_budgets()) > 1:
general['# budgets'] = len(self.runscontainer.get_budgets())
if len(self.runscontainer.get_folders()) > 1:
general['# parallel runs'] = len(self.runscontainer.get_folders())
# Scenario related
general['# parameters'] = len(scenario.cs.get_hyperparameters())
general['Deterministic target algorithm'] = scenario.deterministic
general['Optimized run objective'] = scenario.run_obj
if scenario.cutoff or scenario.run_obj == 'runtime':
general['Cutoff'] = scenario.cutoff
if any([str(lim)!='inf' for lim in [scenario.wallclock_limit, scenario.ta_run_limit, scenario.algo_runs_timelimit]]):
general['Walltime budget'] = scenario.wallclock_limit
general['Runcount budget'] = scenario.ta_run_limit
general['CPU budget'] = scenario.algo_runs_timelimit
# Instances
num_train, num_test = [len([i for i in insts if i]) for insts in [scenario.train_insts, scenario.test_insts]]
if num_train > 0 or num_test > 0:
general['# instances (train/test)'] = "{} / {}".format(num_train, num_test)
# Features
num_feats = scenario.n_features if scenario.feature_dict else 0
num_dup_feats = 0
if scenario.feature_dict:
dup_feats = DataFrame(scenario.feature_array)
num_dup_feats = len(dup_feats[dup_feats.duplicated()]) # only contains train instances
if num_feats > 0:
general['# features (duplicates)'] = "{} ({})".format(num_feats, num_dup_feats)
general['----------'] = '----------'
combined_run = self.runscontainer.get_aggregated(False, False)[0]
combined_stats = self._stats_for_run(combined_run.original_runhistory,
combined_run.scenario,
combined_run.incumbent)
for k, v in combined_stats.items():
general[k] = v
return general
def _runspec_dict(self, identify='parallel'):
"""
identify-keyword specifies whether to use path or budget for name
"""
if identify not in ['parallel', 'budget']:
raise ValueError("illegal use of _runspec_dict")
if (identify == 'budget' and len(self.runscontainer.get_budgets()) <= 1 and
(self.runscontainer.get_budgets() is None or self.runscontainer.get_budgets()[0] == 0.0)):
return False
if (identify == 'parallel' and len(self.runscontainer.get_folders()) <= 1):
return False
runspec = OrderedDict()
runs = self.runscontainer.get_aggregated(keep_folders=identify=='parallel',
keep_budgets=identify=='budget')
for idx, run in enumerate(runs):
if identify == 'budget' and len(set(run.reduced_to_budgets)) != 1:
raise ValueError("Runs processed here should only have a single budget specified (%s)." %
run.reduced_to_budgets)
self.logger.debug("Path to folder for run no. {}: {}".format(idx, str(run.path_to_folder)))
name = os.path.basename(run.path_to_folder) if identify == 'parallel' else str(run.reduced_to_budgets[0])
runspec[name] = self._stats_for_run(run.original_runhistory,
run.scenario,
run.incumbent)
return runspec
def _configspace(self, cs):
""" Return configspace in table-format """
d = OrderedDict([("Parameter", []),
("Type", []),
("Range/Choices", []),
("Default", [])]
)
for hp in cs.get_hyperparameters():
d["Parameter"].append(hp.name)
d["Type"].append(type(hp).__name__)
if isinstance(hp, NumericalHyperparameter):
d["Range/Choices"].append("[{}, {}]{}".format(hp.lower, hp.upper, ' (log)' if hp.log else ''))
elif isinstance(hp, CategoricalHyperparameter):
d["Range/Choices"].append("{}".format(hp.choices))
elif isinstance(hp, OrdinalHyperparameter):
d["Range/Choices"].append("{}".format(hp.sequence))
elif isinstance(hp, Constant):
d["Range/Choices"].append("{}".format(hp.default_value))
else:
d["Range/Choices"].append("?")
d["Default"].append(hp.default_value)
return d
| 50.362245 | 145 | 0.594063 | import os
from collections import OrderedDict
import numpy as np
from ConfigSpace.hyperparameters import NumericalHyperparameter, CategoricalHyperparameter, OrdinalHyperparameter, \
Constant
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.helpers import get_config_origin
class OverviewTable(BaseAnalyzer):
"""
Meta data, i.e. number of instances and parameters as well as configuration budget. Statistics apply to the
best run, if multiple configurator runs are compared.
"""
def __init__(self, runscontainer):
super().__init__(runscontainer)
self.output_dir = runscontainer.output_dir
self.run()
def get_name(self):
return "Meta Data"
def run(self):
""" Generate tables. """
scenario = self.runscontainer.scenario
# General infos
general_dict = self._general_dict(scenario)
html_table_general = DataFrame(data=OrderedDict([('General', general_dict)]))
html_table_general = html_table_general.reindex(list(general_dict.keys()))
html_table_general = html_table_general.to_html(escape=False, header=False, justify='left')
self.result["General"] = {"table": html_table_general,
"tooltip": "General information about the optimization scenario."}
# Run-specific / budget specific infos
for mode in ['parallel', 'budget']:
runspec_dict = self._runspec_dict(identify=mode)
if not runspec_dict:
continue
order_spec = list(list(runspec_dict.values())[0].keys()) # Get keys of any sub-dict for order
html_table_specific = DataFrame(runspec_dict)
html_table_specific = html_table_specific.reindex(order_spec)
html_table_specific = html_table_specific.to_html(escape=False, justify='left')
if mode == 'parallel':
self.result["Parallel Runs"] = {"table": html_table_specific,
"tooltip": "Information to individual parallel runs."}
if mode == 'budget':
self.result["Budgets"] = {"table": html_table_specific,
"tooltip": "Statistics related to the budgets used in this optimization."}
# ConfigSpace in tabular form
cs_dict = self._configspace(scenario.cs)
cs_table = DataFrame(data=cs_dict)
html_table_cs = cs_table.to_html(escape=False, justify='left', index=False)
self.result["Configuration Space"] = {"table": html_table_cs,
"tooltip": "The parameter configuration space. "
"(See github.com/automl/ConfigSpace)"}
return self.result
def _general_dict(self, scenario):
""" Generate the meta-information that holds for all runs (scenario info etc)
Parameters
----------
scenario: smac.Scenario
scenario file to get information from
"""
# general stores information that holds for all runs, runspec holds information on a run-basis
general = OrderedDict()
if len(self.runscontainer.get_budgets()) > 1:
general['# budgets'] = len(self.runscontainer.get_budgets())
if len(self.runscontainer.get_folders()) > 1:
general['# parallel runs'] = len(self.runscontainer.get_folders())
# Scenario related
general['# parameters'] = len(scenario.cs.get_hyperparameters())
general['Deterministic target algorithm'] = scenario.deterministic
general['Optimized run objective'] = scenario.run_obj
if scenario.cutoff or scenario.run_obj == 'runtime':
general['Cutoff'] = scenario.cutoff
if any([str(lim)!='inf' for lim in [scenario.wallclock_limit, scenario.ta_run_limit, scenario.algo_runs_timelimit]]):
general['Walltime budget'] = scenario.wallclock_limit
general['Runcount budget'] = scenario.ta_run_limit
general['CPU budget'] = scenario.algo_runs_timelimit
# Instances
num_train, num_test = [len([i for i in insts if i]) for insts in [scenario.train_insts, scenario.test_insts]]
if num_train > 0 or num_test > 0:
general['# instances (train/test)'] = "{} / {}".format(num_train, num_test)
# Features
num_feats = scenario.n_features if scenario.feature_dict else 0
num_dup_feats = 0
if scenario.feature_dict:
dup_feats = DataFrame(scenario.feature_array)
num_dup_feats = len(dup_feats[dup_feats.duplicated()]) # only contains train instances
if num_feats > 0:
general['# features (duplicates)'] = "{} ({})".format(num_feats, num_dup_feats)
general['----------'] = '----------'
combined_run = self.runscontainer.get_aggregated(False, False)[0]
combined_stats = self._stats_for_run(combined_run.original_runhistory,
combined_run.scenario,
combined_run.incumbent)
for k, v in combined_stats.items():
general[k] = v
return general
def _runspec_dict(self, identify='parallel'):
"""
identify-keyword specifies whether to use path or budget for name
"""
if identify not in ['parallel', 'budget']:
raise ValueError("illegal use of _runspec_dict")
if (identify == 'budget' and len(self.runscontainer.get_budgets()) <= 1 and
(self.runscontainer.get_budgets() is None or self.runscontainer.get_budgets()[0] == 0.0)):
return False
if (identify == 'parallel' and len(self.runscontainer.get_folders()) <= 1):
return False
runspec = OrderedDict()
runs = self.runscontainer.get_aggregated(keep_folders=identify=='parallel',
keep_budgets=identify=='budget')
for idx, run in enumerate(runs):
if identify == 'budget' and len(set(run.reduced_to_budgets)) != 1:
raise ValueError("Runs processed here should only have a single budget specified (%s)." %
run.reduced_to_budgets)
self.logger.debug("Path to folder for run no. {}: {}".format(idx, str(run.path_to_folder)))
name = os.path.basename(run.path_to_folder) if identify == 'parallel' else str(run.reduced_to_budgets[0])
runspec[name] = self._stats_for_run(run.original_runhistory,
run.scenario,
run.incumbent)
return runspec
def _stats_for_run(self, rh, scenario, incumbent):
result = OrderedDict()
all_configs = rh.get_all_configs()
default = scenario.cs.get_default_configuration()
# Runtime statistics
all_ta_runtimes = [run_value.time for run_value in rh.data.values()]
result['Total time spent evaluating configurations'] = "{:.2f} sec".format(np.sum(all_ta_runtimes))
result['Average time per configuration (mean / std)'] = '{:5.2f} sec (± {:5.2f})'.format(np.mean(all_ta_runtimes),
np.std(all_ta_runtimes))
# Number of evaluations
ta_evals = [len(rh.get_runs_for_config(c, only_max_observed_budget=True)) for c in all_configs]
result['# evaluated configurations'] = len(all_configs)
if not scenario.deterministic:
result['# evaluations in total'] = np.sum(ta_evals)
result['# evaluations for default/incumbent'] = "{}/{}".format(len(rh.get_runs_for_config(default, only_max_observed_budget=True)),
len(rh.get_runs_for_config(incumbent, only_max_observed_budget=True)))
result['# runs per configuration (min, mean and max)'] = "{}/{:.2f}/{}".format(
np.min(ta_evals), np.mean(ta_evals), np.max(ta_evals))
# Info about configurations
num_changed_params = len([p for p in scenario.cs.get_hyperparameter_names() if default[p] != incumbent[p]])
result['# changed parameters (default to incumbent)'] = num_changed_params
# Origins
origins = [get_config_origin(c) for c in all_configs]
origins = {o : origins.count(o) for o in set(origins)}
if not (list(origins.keys()) == ["Unknown"]):
result['Configuration origins'] = ", ".join(['{} : {}'.format(o, n) for o, n in origins.items()])
return result
def _configspace(self, cs):
""" Return configspace in table-format """
d = OrderedDict([("Parameter", []),
("Type", []),
("Range/Choices", []),
("Default", [])]
)
for hp in cs.get_hyperparameters():
d["Parameter"].append(hp.name)
d["Type"].append(type(hp).__name__)
if isinstance(hp, NumericalHyperparameter):
d["Range/Choices"].append("[{}, {}]{}".format(hp.lower, hp.upper, ' (log)' if hp.log else ''))
elif isinstance(hp, CategoricalHyperparameter):
d["Range/Choices"].append("{}".format(hp.choices))
elif isinstance(hp, OrdinalHyperparameter):
d["Range/Choices"].append("{}".format(hp.sequence))
elif isinstance(hp, Constant):
d["Range/Choices"].append("{}".format(hp.default_value))
else:
d["Range/Choices"].append("?")
d["Default"].append(hp.default_value)
return d
| 2,106 | 0 | 80 |
68c6d1b211ab543bdedf130ef99c5784855d0423 | 8,876 | py | Python | text_processing.py | kudari00/LING131FinalProject | 1b6d68a174097cd9491d4cdb492090f8431d2318 | [
"MIT"
] | null | null | null | text_processing.py | kudari00/LING131FinalProject | 1b6d68a174097cd9491d4cdb492090f8431d2318 | [
"MIT"
] | null | null | null | text_processing.py | kudari00/LING131FinalProject | 1b6d68a174097cd9491d4cdb492090f8431d2318 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import random
from sklearn.preprocessing import LabelEncoder
import nltk
from Stemmer import Stemmer
def dataPrepro(raw_text, y_enc):
'''
preprocess data, and tokenize
arg:
raw_test: pandes array, each line contains a text
y_enc: pandas array, each line is the label(1 for spam, 0 for ham)
returns:
data_tokenized: a processed and tokenized data(numpy array),
in the form like below:
[[feature1_value, feature2_value, feature3_value..., label],
...
[feature1_value, feature2_value, feature3_value..., label]]
each feature value defines whether a n-gram(unigram or bigram) is in the sentence
processed: the preprocessed text
'''
# replace e-mail address, url, money symbol, phone number and number
# with emailaddr, httpaddr, moneysymb, phonenum, and number
print('step1: replace emal,url,money symbol,phone number,number with their classes...')
processed = raw_text.str.replace(r'\b[\w\-.]+?@\w+?\.\w{2,4}\b',
' emailaddr ')
processed = processed.str.replace(r'(http[s]?\S+)|(\w+\.[A-Za-z]{2,4}\S*)',
' httpaddr ')
processed = processed.str.replace(r'£|\$', ' moneysymb ')
processed = processed.str.replace(
r'(\+\d{1,2}\s)?\d?[\-(.]?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4}\b',
' phonenum ')
processed = processed.str.replace(r'(\s)?\d+(\.\d+)?(\s|\.|\,|\d|\?)', ' num ')
print('done')
# remove punctuations
print('step2: remove punctuations, spaces...')
processed = processed.str.replace(r'[^\w\d\s]', ' ')
processed = processed.str.replace(r'^\s+|\s+?$', '')
processed = processed.str.lower()
print('done')
# remove stop words
# here we define an inline function removeStopWord to generate text without stopwords
print('step3: remove stop words...')
stop_words = set(nltk.corpus.stopwords.words('english'))
processed = processed.apply(removeStopWord)
print('done')
# stemming
# we use our redefined simplified Stemmer to stem
print('step4: stemming...')
simple_porter = Stemmer()
processed = processed.apply(stemming)
print('done')
# replace some odd words by mannual concluded rules
print('step5: replaced with mannual rules...')
mannual_word_map = {
'aaooooright':'alright',
'aww':'aw',
'awww':'aw',
'baaaaaaaabe':'babe',
'baaaaabe':'babe',
'boooo':'boo',
'buzzzz':'buzz',
'daaaaa':'da',
'ffffffffff':'f',
'fffff':'f',
'ffffuuuuuuu':'fu',
'geeee':'gee',
'geeeee':'gee',
'hmm':'hm',
'hmmm':'hm',
'hmmmm':'hm',
'latelyxxx':'late',
'lololo':'lol',
'loooooool':'lol',
'lool':'lol',
'looovvve':'love',
'miiiiiiissssssssss':'miss',
'mmm':'mm',
'mmmm':'mm',
'mmmmm':'mm',
'mmmmmm':'mm',
'mmmmmmm':'mm',
'nooooooo':'no',
'noooooooo':'no',
'oooh':'ooh',
'oooooh':'ooh',
'ooooooh':'ooh',
'pleassssssseeeeee':'please',
'sooo':'soo',
'soooo':'soo',
'sooooo':'soo',
'ummmmmaah':'nmma',
'xxxxx':'xxxx',
'xxxxxx':'xxxx',
'xxxxxxx':'xxxx',
'xxxxxxxx':'xxxx',
'xxxxxxxxx':'xxxx',
'xxxxxxxxxxxxxx':'xxxx',
}
processed = processed.apply(mannualReplace)
print('done')
# replace rare word with <unk>
print('step6: replace rare words with <unk>...')
# replace number again
processed = processed.str.replace(r'\s\d+(\.\d+)?(\s|\.|\,|\d|\?)', ' number ')
vocab = {}
# building inventory
for sent in processed:
words = sent.split(' ')
for word in words:
if(word not in vocab.keys()):
vocab[word] = 1
else:
vocab[word] += 1
# sorted words by their frequency, from high to low
sorted_list = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
# print(sorted_list[:-1000])
preserved_list = []
for i in range(len(sorted_list)):
preserved_list.append(sorted_list[i][0])
# print('size of vocab:',len(preserved_list))
# preserve the first 6000 words in preserved_list
preserved_list = preserved_list[:6000]
processed = processed.apply(replaceUNK)
print('done')
# To avoid over fitting, add some noise to the modal to increase robustness
print('step7: add noise....')
spam_list = []
ham_list = []
# seperate our current data to ham and spam list
for i in range(len(processed)):
if(y_enc[i] == 1):
spam_list.append(processed[i].split(' '))
else:
ham_list.append(processed[i].split(' '))
# using dynamic programming to define a function to calculate edit distance
# processing data
for i in range(len(processed)):
if i % 500 == 0:
print('proceeding data',i,'to',min(i+499,len(processed)))
sent = processed[i].split(' ')
if y_enc[i] == 1:
for s in spam_list:
edit_dist = editDistance(sent, s)
if (edit_dist > 0) and (edit_dist < 3):
index = random.randint(0, len(s)-1)
if index < len(sent):
sent[index] = s[index]
else:
sent.append(s[index])
processed[i] = ' '.join(sent)
break
else:
for s in ham_list:
edit_dist = editDistance(sent, s)
if (edit_dist > 0) and (edit_dist < 3):
index = random.randint(0, len(s)-1)
if index < len(sent):
sent[index] = s[index]
else:
sent.append(s[index])
processed[i] = ' '.join(sent)
break
print('done')
# then we begin to tokenize
print('tokenizing...')
# construct the mapping from n-grams to feature indecies
n_gram_map = {}
for sent in processed:
cnt = 0
sent = sent.split(' ')
for n in [1, 2]:
for i in range(len(sent)-n):
gram = ' '.join(sent[i:i+n])
if gram not in n_gram_map.keys():
n_gram_map[gram] = cnt
cnt += 1
# print(len(n_gram_map)) #there are totaly 31493 n-grams
# begin tokenizing
data_tokenized = []
for i in range(len(processed)):
feature_vec = [0] * 31494
sent = processed[i].split(' ')
for n in [1, 2]:
for i in range(len(sent) - n):
gram = ' '.join(sent[i:i + n])
feature_vec[n_gram_map[gram]] = 1
feature_vec[-1] = int(y_enc[i])
data_tokenized.append(feature_vec)
data_tokenized = np.array(data_tokenized)
print('done, the data size is:', data_tokenized.shape[0], 'the feature size is ', data_tokenized.shape[1] - 1)
return data_tokenized, processed
| 34.138462 | 114 | 0.522082 | import pandas as pd
import numpy as np
import random
from sklearn.preprocessing import LabelEncoder
import nltk
from Stemmer import Stemmer
def dataPrepro(raw_text, y_enc):
'''
preprocess data, and tokenize
arg:
raw_test: pandes array, each line contains a text
y_enc: pandas array, each line is the label(1 for spam, 0 for ham)
returns:
data_tokenized: a processed and tokenized data(numpy array),
in the form like below:
[[feature1_value, feature2_value, feature3_value..., label],
...
[feature1_value, feature2_value, feature3_value..., label]]
each feature value defines whether a n-gram(unigram or bigram) is in the sentence
processed: the preprocessed text
'''
# replace e-mail address, url, money symbol, phone number and number
# with emailaddr, httpaddr, moneysymb, phonenum, and number
print('step1: replace emal,url,money symbol,phone number,number with their classes...')
processed = raw_text.str.replace(r'\b[\w\-.]+?@\w+?\.\w{2,4}\b',
' emailaddr ')
processed = processed.str.replace(r'(http[s]?\S+)|(\w+\.[A-Za-z]{2,4}\S*)',
' httpaddr ')
processed = processed.str.replace(r'£|\$', ' moneysymb ')
processed = processed.str.replace(
r'(\+\d{1,2}\s)?\d?[\-(.]?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4}\b',
' phonenum ')
processed = processed.str.replace(r'(\s)?\d+(\.\d+)?(\s|\.|\,|\d|\?)', ' num ')
print('done')
# remove punctuations
print('step2: remove punctuations, spaces...')
processed = processed.str.replace(r'[^\w\d\s]', ' ')
processed = processed.str.replace(r'^\s+|\s+?$', '')
processed = processed.str.lower()
print('done')
# remove stop words
# here we define an inline function removeStopWord to generate text without stopwords
print('step3: remove stop words...')
stop_words = set(nltk.corpus.stopwords.words('english'))
def removeStopWord(sent):
sent = sent.split(' ')
word_list = [word for word in sent if word not in stop_words]
return ' '.join(word_list)
processed = processed.apply(removeStopWord)
print('done')
# stemming
# we use our redefined simplified Stemmer to stem
print('step4: stemming...')
simple_porter = Stemmer()
def stemming(sent):
sent = sent.split(' ')
word_list = [simple_porter.stem(word) for word in sent]
return ' '.join(word_list)
processed = processed.apply(stemming)
print('done')
# replace some odd words by mannual concluded rules
print('step5: replaced with mannual rules...')
mannual_word_map = {
'aaooooright':'alright',
'aww':'aw',
'awww':'aw',
'baaaaaaaabe':'babe',
'baaaaabe':'babe',
'boooo':'boo',
'buzzzz':'buzz',
'daaaaa':'da',
'ffffffffff':'f',
'fffff':'f',
'ffffuuuuuuu':'fu',
'geeee':'gee',
'geeeee':'gee',
'hmm':'hm',
'hmmm':'hm',
'hmmmm':'hm',
'latelyxxx':'late',
'lololo':'lol',
'loooooool':'lol',
'lool':'lol',
'looovvve':'love',
'miiiiiiissssssssss':'miss',
'mmm':'mm',
'mmmm':'mm',
'mmmmm':'mm',
'mmmmmm':'mm',
'mmmmmmm':'mm',
'nooooooo':'no',
'noooooooo':'no',
'oooh':'ooh',
'oooooh':'ooh',
'ooooooh':'ooh',
'pleassssssseeeeee':'please',
'sooo':'soo',
'soooo':'soo',
'sooooo':'soo',
'ummmmmaah':'nmma',
'xxxxx':'xxxx',
'xxxxxx':'xxxx',
'xxxxxxx':'xxxx',
'xxxxxxxx':'xxxx',
'xxxxxxxxx':'xxxx',
'xxxxxxxxxxxxxx':'xxxx',
}
def mannualReplace(sent):
sent = sent.split(' ')
word_list = []
for word in sent:
if(word in mannual_word_map.keys()):
word_list.append(mannual_word_map[word])
else:
word_list.append(word)
return ' '.join(word_list)
processed = processed.apply(mannualReplace)
print('done')
# replace rare word with <unk>
print('step6: replace rare words with <unk>...')
# replace number again
processed = processed.str.replace(r'\s\d+(\.\d+)?(\s|\.|\,|\d|\?)', ' number ')
vocab = {}
# building inventory
for sent in processed:
words = sent.split(' ')
for word in words:
if(word not in vocab.keys()):
vocab[word] = 1
else:
vocab[word] += 1
# sorted words by their frequency, from high to low
sorted_list = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
# print(sorted_list[:-1000])
preserved_list = []
for i in range(len(sorted_list)):
preserved_list.append(sorted_list[i][0])
# print('size of vocab:',len(preserved_list))
# preserve the first 6000 words in preserved_list
preserved_list = preserved_list[:6000]
def replaceUNK(sent):
sent = sent.split(' ')
for i in range(len(sent)):
if(sent[i] not in preserved_list):
sent[i] = '<unk>'
return ' '.join(sent)
processed = processed.apply(replaceUNK)
print('done')
# To avoid over fitting, add some noise to the modal to increase robustness
print('step7: add noise....')
spam_list = []
ham_list = []
# seperate our current data to ham and spam list
for i in range(len(processed)):
if(y_enc[i] == 1):
spam_list.append(processed[i].split(' '))
else:
ham_list.append(processed[i].split(' '))
# using dynamic programming to define a function to calculate edit distance
def editDistance(l1,l2):
len1 = len(l1) + 1
len2 = len(l2) + 1
# create matrix
e = [0 for n in range(len1 * len2)]
# first row of the matrix
for i in range(len1):
e[i] = i
# first coloum of the matrix
for j in range(0, len(e), len1):
if(j % len1 == 0):
e[j] = j // len1
# get edit distance by state transit formula
for i in range(1,len1):
for j in range(1,len2):
if l1[i-1] == l2[j-1]:
cost = 0
else:
cost = 1
e[j*len1+i] = min(e[(j-1)*len1+i]+1,
e[j*len1+(i-1)]+1,
e[(j-1)*len1+(i-1)] + cost)
return e[-1]
# processing data
for i in range(len(processed)):
if i % 500 == 0:
print('proceeding data',i,'to',min(i+499,len(processed)))
sent = processed[i].split(' ')
if y_enc[i] == 1:
for s in spam_list:
edit_dist = editDistance(sent, s)
if (edit_dist > 0) and (edit_dist < 3):
index = random.randint(0, len(s)-1)
if index < len(sent):
sent[index] = s[index]
else:
sent.append(s[index])
processed[i] = ' '.join(sent)
break
else:
for s in ham_list:
edit_dist = editDistance(sent, s)
if (edit_dist > 0) and (edit_dist < 3):
index = random.randint(0, len(s)-1)
if index < len(sent):
sent[index] = s[index]
else:
sent.append(s[index])
processed[i] = ' '.join(sent)
break
print('done')
# then we begin to tokenize
print('tokenizing...')
# construct the mapping from n-grams to feature indecies
n_gram_map = {}
for sent in processed:
cnt = 0
sent = sent.split(' ')
for n in [1, 2]:
for i in range(len(sent)-n):
gram = ' '.join(sent[i:i+n])
if gram not in n_gram_map.keys():
n_gram_map[gram] = cnt
cnt += 1
# print(len(n_gram_map)) #there are totaly 31493 n-grams
# begin tokenizing
data_tokenized = []
for i in range(len(processed)):
feature_vec = [0] * 31494
sent = processed[i].split(' ')
for n in [1, 2]:
for i in range(len(sent) - n):
gram = ' '.join(sent[i:i + n])
feature_vec[n_gram_map[gram]] = 1
feature_vec[-1] = int(y_enc[i])
data_tokenized.append(feature_vec)
data_tokenized = np.array(data_tokenized)
print('done, the data size is:', data_tokenized.shape[0], 'the feature size is ', data_tokenized.shape[1] - 1)
return data_tokenized, processed
| 1,488 | 0 | 131 |
865aa6593c6ae166633998ce4bc023c15ca5a4b7 | 1,339 | py | Python | word2morph/entities/sample.py | MartinXPN/WordSegmentation | 26ea79275c0916b794be98161341c90471f50a21 | [
"MIT"
] | 1 | 2021-02-22T13:02:38.000Z | 2021-02-22T13:02:38.000Z | word2morph/entities/sample.py | MartinXPN/WordSegmentation | 26ea79275c0916b794be98161341c90471f50a21 | [
"MIT"
] | 1 | 2019-04-19T17:06:41.000Z | 2019-04-19T17:06:41.000Z | word2morph/entities/sample.py | MartinXPN/WordSegmentation | 26ea79275c0916b794be98161341c90471f50a21 | [
"MIT"
] | null | null | null | from typing import Tuple, Optional
# дум:ROOT
| 33.475 | 95 | 0.62584 | from typing import Tuple, Optional
class Segment(object):
# дум:ROOT
def __init__(self, segment: str, segment_type: str = None):
self.segment = segment # дум
self.type = segment_type # ROOT
def __str__(self):
if self.type is None:
return f'{self.segment}'
return f'{self.segment}:{self.type}'
def __eq__(self, other: 'Segment') -> bool:
return self.segment == other.segment and self.type == other.type
class Sample(object):
# word=одуматься segments=о:PREF/дум:ROOT/а:SUFF/ть:SUFF/ся:POSTFIX
# word=одуматься segments=((о, PREF), (дум, ROOT), (а, SUFF), (ть, SUFF), (ся, POSTFIX))
def __init__(self, word: str, segments: Tuple[Segment, ...]):
self.word = word
self.segments = segments
@property
def segment_types(self) -> Tuple[Optional[str], ...]:
return tuple([segment.type for segment in self.segments])
@property
def segment_parts(self) -> Tuple[str, ...]:
return tuple([segment.segment for segment in self.segments])
def __str__(self):
segments_str = '/'.join([str(segment) for segment in self.segments])
return f'{self.word}\t{segments_str}'
def __eq__(self, other: 'Sample') -> bool:
return self.word == other.word and self.segments == other.segments
| 833 | 367 | 126 |
47816cbbcf9b6c74f2358702c988d40cc9df0f3e | 85 | py | Python | mne_bids/report/__init__.py | Swastyy/mne-bids | 87c7165b978cd064c5001d2737a74c2fe2ce1c01 | [
"BSD-3-Clause"
] | null | null | null | mne_bids/report/__init__.py | Swastyy/mne-bids | 87c7165b978cd064c5001d2737a74c2fe2ce1c01 | [
"BSD-3-Clause"
] | null | null | null | mne_bids/report/__init__.py | Swastyy/mne-bids | 87c7165b978cd064c5001d2737a74c2fe2ce1c01 | [
"BSD-3-Clause"
] | 1 | 2022-02-21T09:57:36.000Z | 2022-02-21T09:57:36.000Z | """Create a summary report of the BIDS dataset."""
from ._report import make_report
| 21.25 | 50 | 0.752941 | """Create a summary report of the BIDS dataset."""
from ._report import make_report
| 0 | 0 | 0 |
7ab12ad246295aa2b2b634d866cb79033f5e89a4 | 9,017 | py | Python | tests/compute_area_test.py | koritsky/pointconv | df0fd6ebf36011a6ef544fbc746327d49e09fda9 | [
"MIT"
] | 1 | 2022-01-14T03:49:42.000Z | 2022-01-14T03:49:42.000Z | tests/compute_area_test.py | koritsky/pointconv | df0fd6ebf36011a6ef544fbc746327d49e09fda9 | [
"MIT"
] | null | null | null | tests/compute_area_test.py | koritsky/pointconv | df0fd6ebf36011a6ef544fbc746327d49e09fda9 | [
"MIT"
] | null | null | null | import unittest
import math
import numpy as np
from functools import reduce
from unittest.case import expectedFailure
import nibabel
def euler2mat(z=0, y=0, x=0):
''' Return matrix for rotations around z, y and x axes
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
M : array shape (3,3)
Rotation matrix giving same rotation as for given angles
Examples
--------
>>> zrot = 1.3 # radians
>>> yrot = -0.1
>>> xrot = 0.2
>>> M = euler2mat(zrot, yrot, xrot)
>>> M.shape == (3, 3)
True
The output rotation matrix is equal to the composition of the
individual rotations
>>> M1 = euler2mat(zrot)
>>> M2 = euler2mat(0, yrot)
>>> M3 = euler2mat(0, 0, xrot)
>>> composed_M = np.dot(M3, np.dot(M2, M1))
>>> np.allclose(M, composed_M)
True
You can specify rotations by named arguments
>>> np.all(M3 == euler2mat(x=xrot))
True
When applying M to a vector, the vector should column vector to the
right of M. If the right hand side is a 2D array rather than a
vector, then each column of the 2D array represents a vector.
>>> vec = np.array([1, 0, 0]).reshape((3,1))
>>> v2 = np.dot(M, vec)
>>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
>>> vecs2 = np.dot(M, vecs)
Rotations are counter-clockwise.
>>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
>>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
True
>>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
>>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
True
>>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
>>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
True
Notes
-----
The direction of rotation is given by the right-hand rule (orient
the thumb of the right hand along the axis around which the rotation
occurs, with the end of the thumb at the positive end of the axis;
curl your fingers; the direction your fingers curl is the direction
of rotation). Therefore, the rotations are counterclockwise if
looking along the axis of rotation from positive to negative.
'''
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array(
[[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array(
[[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array(
[[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::-1])
return np.eye(3)
def mat2euler(M, cy_thresh=None):
''' Discover Euler angle vector from 3x3 matrix
Uses the conventions above.
Parameters
----------
M : array-like, shape (3,3)
cy_thresh : None or scalar, optional
threshold below which to give up on straightforward arctan for
estimating x rotation. If None (default), estimate from
precision of input.
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Notes
-----
If there was no numerical error, the routine could be derived using
Sympy expression for z then y then x rotation matrix, which is::
[ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
[cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
[sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
with the obvious derivations for z, y, and x
z = atan2(-r12, r11)
y = asin(r13)
x = atan2(-r23, r33)
Problems arise when cos(y) is close to zero, because both of::
z = atan2(cos(y)*sin(z), cos(y)*cos(z))
x = atan2(cos(y)*sin(x), cos(x)*cos(y))
will be close to atan2(0, 0), and highly unstable.
The ``cy`` fix for numerical instability below is from: *Graphics
Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
0123361559. Specifically it comes from EulerAngles.c by Ken
Shoemake, and deals with the case where cos(y) is close to zero:
See: http://www.graphicsgems.org/
The code appears to be licensed (from the website) as "can be used
without restrictions".
'''
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = _FLOAT_EPS_4
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
# cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
cy = math.sqrt(r33*r33 + r23*r23)
if cy > cy_thresh: # cos(y) not close to zero, standard form
z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = math.atan2(r21, r22)
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = 0.0
return z, y, x
def euler2quat(z=0, y=0, x=0):
''' Return quaternion corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
quat : array shape (4,)
Quaternion in w, x, y z (real, then vector) format
Notes
-----
We can derive this formula in Sympy using:
1. Formula giving quaternion corresponding to rotation of theta radians
about arbitrary axis:
http://mathworld.wolfram.com/EulerParameters.html
2. Generated formulae from 1.) for quaternions corresponding to
theta radians rotations about ``x, y, z`` axes
3. Apply quaternion multiplication formula -
http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to
formulae from 2.) to give formula for combined rotations.
'''
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
return np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
def quat2euler(q):
''' Return Euler angles corresponding to quaternion `q`
Parameters
----------
q : 4 element sequence
w, x, y, z of quaternion
Returns
-------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
the reduction in computation is small, and the code repetition is
large.
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
return mat2euler(nq.quat2mat(q))
if __name__ == '__main__':
unittest.main()
| 35.360784 | 99 | 0.584341 | import unittest
import math
import numpy as np
from functools import reduce
from unittest.case import expectedFailure
import nibabel
def euler2mat(z=0, y=0, x=0):
''' Return matrix for rotations around z, y and x axes
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
M : array shape (3,3)
Rotation matrix giving same rotation as for given angles
Examples
--------
>>> zrot = 1.3 # radians
>>> yrot = -0.1
>>> xrot = 0.2
>>> M = euler2mat(zrot, yrot, xrot)
>>> M.shape == (3, 3)
True
The output rotation matrix is equal to the composition of the
individual rotations
>>> M1 = euler2mat(zrot)
>>> M2 = euler2mat(0, yrot)
>>> M3 = euler2mat(0, 0, xrot)
>>> composed_M = np.dot(M3, np.dot(M2, M1))
>>> np.allclose(M, composed_M)
True
You can specify rotations by named arguments
>>> np.all(M3 == euler2mat(x=xrot))
True
When applying M to a vector, the vector should column vector to the
right of M. If the right hand side is a 2D array rather than a
vector, then each column of the 2D array represents a vector.
>>> vec = np.array([1, 0, 0]).reshape((3,1))
>>> v2 = np.dot(M, vec)
>>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
>>> vecs2 = np.dot(M, vecs)
Rotations are counter-clockwise.
>>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
>>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
True
>>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
>>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
True
>>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
>>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
True
Notes
-----
The direction of rotation is given by the right-hand rule (orient
the thumb of the right hand along the axis around which the rotation
occurs, with the end of the thumb at the positive end of the axis;
curl your fingers; the direction your fingers curl is the direction
of rotation). Therefore, the rotations are counterclockwise if
looking along the axis of rotation from positive to negative.
'''
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array(
[[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array(
[[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array(
[[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::-1])
return np.eye(3)
def mat2euler(M, cy_thresh=None):
''' Discover Euler angle vector from 3x3 matrix
Uses the conventions above.
Parameters
----------
M : array-like, shape (3,3)
cy_thresh : None or scalar, optional
threshold below which to give up on straightforward arctan for
estimating x rotation. If None (default), estimate from
precision of input.
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Notes
-----
If there was no numerical error, the routine could be derived using
Sympy expression for z then y then x rotation matrix, which is::
[ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
[cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
[sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
with the obvious derivations for z, y, and x
z = atan2(-r12, r11)
y = asin(r13)
x = atan2(-r23, r33)
Problems arise when cos(y) is close to zero, because both of::
z = atan2(cos(y)*sin(z), cos(y)*cos(z))
x = atan2(cos(y)*sin(x), cos(x)*cos(y))
will be close to atan2(0, 0), and highly unstable.
The ``cy`` fix for numerical instability below is from: *Graphics
Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
0123361559. Specifically it comes from EulerAngles.c by Ken
Shoemake, and deals with the case where cos(y) is close to zero:
See: http://www.graphicsgems.org/
The code appears to be licensed (from the website) as "can be used
without restrictions".
'''
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = _FLOAT_EPS_4
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
# cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
cy = math.sqrt(r33*r33 + r23*r23)
if cy > cy_thresh: # cos(y) not close to zero, standard form
z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = math.atan2(r21, r22)
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = 0.0
return z, y, x
def euler2quat(z=0, y=0, x=0):
''' Return quaternion corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
quat : array shape (4,)
Quaternion in w, x, y z (real, then vector) format
Notes
-----
We can derive this formula in Sympy using:
1. Formula giving quaternion corresponding to rotation of theta radians
about arbitrary axis:
http://mathworld.wolfram.com/EulerParameters.html
2. Generated formulae from 1.) for quaternions corresponding to
theta radians rotations about ``x, y, z`` axes
3. Apply quaternion multiplication formula -
http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to
formulae from 2.) to give formula for combined rotations.
'''
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
return np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
def quat2euler(q):
''' Return Euler angles corresponding to quaternion `q`
Parameters
----------
q : 4 element sequence
w, x, y, z of quaternion
Returns
-------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
the reduction in computation is small, and the code repetition is
large.
'''
# delayed import to avoid cyclic dependencies
import nibabel.quaternions as nq
return mat2euler(nq.quat2mat(q))
class ComputeAreaTestCase(unittest.TestCase):
def test_valid_input_rad(self):
x = 1.0
y = 0.1
z = 0.2
output = list(mat2euler(euler2mat(z, y, x)))
expected_output = [z, y, x]
self.assertAlmostEqual(output[0], expected_output[0], places=3)
self.assertAlmostEqual(output[1], expected_output[1], places=3)
self.assertAlmostEqual(output[2], expected_output[2], places=3)
def test_valid_input_M(self):
M = np.eye(3)
output = (euler2mat(mat2euler(M)[0], mat2euler(M)[1], mat2euler(M)[2]))
expected_output = M
norm_Frob_output = np.trace(np.dot(output, output.T))
norm_Frob_M = np.trace(np.dot(M, M.T))
self.assertAlmostEqual(norm_Frob_output, norm_Frob_M, places=3)
def test_valid_input_rad_1(self):
x = 1.0
y = 0.1
z = 0.2
output = list(quat2euler(euler2quat(z, y, x)))
expected_output = [z, y, x]
self.assertAlmostEqual(output[0], expected_output[0], places=3)
self.assertAlmostEqual(output[1], expected_output[1], places=3)
self.assertAlmostEqual(output[2], expected_output[2], places=3)
if __name__ == '__main__':
unittest.main()
| 1,049 | 24 | 103 |
fbf3171e11de8aad5446b85dd6e72062791338c3 | 3,080 | py | Python | server/mainapp/aws.py | shvam0000/MusicApp | e5a86b6cebc6a123447085c58c3cc286e6b582c4 | [
"MIT"
] | 5 | 2022-02-22T16:53:20.000Z | 2022-03-22T17:23:51.000Z | server/mainapp/aws.py | shvam0000/MusicApp | e5a86b6cebc6a123447085c58c3cc286e6b582c4 | [
"MIT"
] | 12 | 2021-11-15T15:47:20.000Z | 2022-03-17T12:52:08.000Z | server/mainapp/aws.py | shvam0000/MusicApp | e5a86b6cebc6a123447085c58c3cc286e6b582c4 | [
"MIT"
] | 9 | 2021-10-18T14:31:41.000Z | 2022-03-12T18:23:52.000Z | from boto3.session import Session
from rest_framework import status
from django.http import response
import botocore
import boto3
from core.settings import (
AWS_STORAGE_BUCKET_NAME,
AWS_SECRET_ACCESS_KEY,
AWS_ACCESS_KEY_ID,
)
from .errors import AWSDownloadError
| 29.333333 | 80 | 0.565584 | from boto3.session import Session
from rest_framework import status
from django.http import response
import botocore
import boto3
from core.settings import (
AWS_STORAGE_BUCKET_NAME,
AWS_SECRET_ACCESS_KEY,
AWS_ACCESS_KEY_ID,
)
from .errors import AWSDownloadError
class AWSFunctionsS3:
def __init__(self):
print("AWS S3 Function Class Initialised")
def upload_file_to_s3(self, cloudFilename: str, fileobj):
"""Uploads file to S3 bucket
Args:
cloudFilename: Name of file in S3 bucket
fileobj: File to be stored
Returns:
None -> Success
response.JsonResponse -> Failure
"""
try:
session = Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
s3 = session.resource("s3")
s3.Bucket(AWS_STORAGE_BUCKET_NAME).put_object(
Key=cloudFilename, Body=fileobj
)
except Exception:
return response.JsonResponse(
{"error_status": True, "success_status": False},
status=status.HTTP_503_SERVICE_UNAVAILABLE,
)
def delete_file_from_s3(self, cloudFilename: str):
"""Deletes file from S3 bucket
Args:
cloudFilename: Name of file in S3 bucket
Returns:
None -> Success
response.JsonResponse -> Failure
"""
try:
session = Session(
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
)
s3 = session.resource("s3")
bucket = s3.Bucket(AWS_STORAGE_BUCKET_NAME)
response = bucket.delete_objects(
Delete={"Objects": [{"Key": cloudFilename}]}
)
except Exception:
return response.JsonResponse(
{"error_status": True, "success_status": False},
status=status.HTTP_503_SERVICE_UNAVAILABLE,
)
def download_file_from_s3(self, cloudFilename: str, downloadFilename: str):
"""Downloads file from S3 bucket
Args:
cloudFilename: Name of file in S3 bucket
downloadFilename: Name of file after download
Returns:
None -> Success
response.JsonResponse -> Failure
"""
s3 = boto3.resource("s3")
try:
s3.Bucket(AWS_STORAGE_BUCKET_NAME).download_file(
cloudFilename, downloadFilename
)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "404":
return response.JsonResponse(
{
"error_status": True,
"success_status": False,
},
status=status.HTTP_404_NOT_FOUND,
)
else:
raise AWSDownloadError("Download Error, Please Try Again Later")
| 49 | 2,730 | 23 |
b2818f39265c4ccddafbd2a91b3f41ba311adca9 | 184 | py | Python | autos/forms.py | CodeByLine/dj4e-tutorial | 8cb592658a18777220eb297e63d42f2a7d53733c | [
"CC-BY-3.0"
] | null | null | null | autos/forms.py | CodeByLine/dj4e-tutorial | 8cb592658a18777220eb297e63d42f2a7d53733c | [
"CC-BY-3.0"
] | 3 | 2021-04-12T16:18:58.000Z | 2021-06-10T20:40:08.000Z | autos/forms.py | CodeByLine/dj4e-tutorial | 8cb592658a18777220eb297e63d42f2a7d53733c | [
"CC-BY-3.0"
] | null | null | null |
from django.forms import ModelForm
from autos.models import Make
# Create the form class.
| 16.727273 | 34 | 0.684783 |
from django.forms import ModelForm
from autos.models import Make
# Create the form class.
class MakeForm(ModelForm):
class Meta:
model = Make
fields = '__all__'
| 0 | 69 | 22 |
1999a513013877f63328295084923063e7609448 | 1,133 | py | Python | tests/ut_RSSModel.py | crawfordsm/pyspectrograph | 4237ba4b4fe08a69e1d6487924d959f089ecca46 | [
"BSD-3-Clause"
] | 18 | 2015-01-11T21:04:59.000Z | 2021-08-06T18:30:47.000Z | tests/ut_RSSModel.py | crawfordsm/pyspectrograph | 4237ba4b4fe08a69e1d6487924d959f089ecca46 | [
"BSD-3-Clause"
] | 14 | 2015-04-23T09:39:16.000Z | 2017-12-03T12:49:05.000Z | tests/ut_RSSModel.py | crawfordsm/pyspectrograph | 4237ba4b4fe08a69e1d6487924d959f089ecca46 | [
"BSD-3-Clause"
] | 5 | 2015-04-23T08:17:37.000Z | 2019-06-22T13:36:47.000Z | import pylab as pl
from PySpectrograph.Models import RSSModel
from PySpectrograph.Spectra import Spectrum
# create the spectrograph model
rss = RSSModel.RSSModel(grating_name="PG0900", gratang=15.875, camang=31.76496,
slit=1.50, xbin=2, ybin=2)
# print out some basic statistics
print(1e7 * rss.calc_bluewavelength(), 1e7 * rss.calc_centralwavelength(), 1e7 * rss.calc_redwavelength())
R = rss.calc_resolution(rss.calc_centralwavelength(), rss.alpha(), -rss.beta())
res = 1e7 * rss.calc_resolelement(rss.alpha(), -rss.beta())
print(R, res)
# set up the detector
ycen = rss.detector.get_ypixcenter()
d_arr = rss.detector.make_detector()[ycen, :]
w = 1e7 * rss.get_wavelength(d_arr)
# set up the artificial spectrum
sw, sf = pl.loadtxt('Ne.txt', usecols=(0, 1), unpack=True)
wrange = [1e7 * rss.calc_bluewavelength(), 1e7 * rss.calc_redwavelength()]
spec = Spectrum.Spectrum(sw, sf, wrange=wrange, dw=res / 10, stype='line', sigma=res)
# interpolate it over the same range as the detector
spec.interp(w)
# plot it
pl.figure()
pl.plot(spec.wavelength, d_arr * ((spec.flux) / spec.flux.max()))
pl.show()
| 32.371429 | 106 | 0.718447 | import pylab as pl
from PySpectrograph.Models import RSSModel
from PySpectrograph.Spectra import Spectrum
# create the spectrograph model
rss = RSSModel.RSSModel(grating_name="PG0900", gratang=15.875, camang=31.76496,
slit=1.50, xbin=2, ybin=2)
# print out some basic statistics
print(1e7 * rss.calc_bluewavelength(), 1e7 * rss.calc_centralwavelength(), 1e7 * rss.calc_redwavelength())
R = rss.calc_resolution(rss.calc_centralwavelength(), rss.alpha(), -rss.beta())
res = 1e7 * rss.calc_resolelement(rss.alpha(), -rss.beta())
print(R, res)
# set up the detector
ycen = rss.detector.get_ypixcenter()
d_arr = rss.detector.make_detector()[ycen, :]
w = 1e7 * rss.get_wavelength(d_arr)
# set up the artificial spectrum
sw, sf = pl.loadtxt('Ne.txt', usecols=(0, 1), unpack=True)
wrange = [1e7 * rss.calc_bluewavelength(), 1e7 * rss.calc_redwavelength()]
spec = Spectrum.Spectrum(sw, sf, wrange=wrange, dw=res / 10, stype='line', sigma=res)
# interpolate it over the same range as the detector
spec.interp(w)
# plot it
pl.figure()
pl.plot(spec.wavelength, d_arr * ((spec.flux) / spec.flux.max()))
pl.show()
| 0 | 0 | 0 |
b1cf6e62e796025a0db200addf50674a66b94745 | 71 | py | Python | dolosse/hardware/xia/pixie16/__init__.py | Tobias2023/dolosse | 7e56a84d7da2a3f0d6d409f1cb7ea5accb12cac5 | [
"Apache-2.0"
] | 9 | 2019-05-17T11:27:02.000Z | 2020-12-18T15:37:48.000Z | dolosse/hardware/xia/pixie16/__init__.py | Tobias2023/dolosse | 7e56a84d7da2a3f0d6d409f1cb7ea5accb12cac5 | [
"Apache-2.0"
] | 84 | 2019-04-08T10:10:37.000Z | 2020-11-16T00:46:54.000Z | dolosse/hardware/xia/pixie16/__init__.py | Tobias2023/dolosse | 7e56a84d7da2a3f0d6d409f1cb7ea5accb12cac5 | [
"Apache-2.0"
] | 5 | 2019-11-09T18:23:48.000Z | 2020-05-08T12:24:55.000Z | """
Classes related to working with XIA's Pixie16 electronics line
"""
| 17.75 | 62 | 0.746479 | """
Classes related to working with XIA's Pixie16 electronics line
"""
| 0 | 0 | 0 |
535661c27727160f3618660e5d2f29b253b44d2a | 1,865 | py | Python | ExampleGame/examplegame/japanese.py | glyph/imaginary | 62299c8a0481bbee51444e688f45385a81cad328 | [
"MIT"
] | 25 | 2015-01-10T02:26:43.000Z | 2021-08-20T09:40:46.000Z | ExampleGame/examplegame/japanese.py | DalavanCloud/imaginary | e84abc98d400cff5e262df2b34e725dde575af8e | [
"MIT"
] | 65 | 2015-01-07T08:02:53.000Z | 2022-02-06T02:15:09.000Z | ExampleGame/examplegame/japanese.py | DalavanCloud/imaginary | e84abc98d400cff5e262df2b34e725dde575af8e | [
"MIT"
] | 7 | 2015-03-03T18:44:29.000Z | 2021-07-28T02:54:10.000Z | """
Japanese language data.
This module contains a dict named 'hiragana' which maps hiragana
unicode characters to romaji pronunciations, as well as a
'romajiToHiragana' dict which maps romaji pronunciation to *lists* of
hiragana characters. There are multiple hiragana characters with the
same pronunciation, thus the multiple values per romaji in the
romajiToHiragana dict.
"""
# Hiragana.
hiragana = {
u'\u3042': 'A', u'\u3044': 'I', u'\u3046': 'U', u'\u3048': 'E',
u'\u3081': 'ME', u'\u3080': 'MU', u'\u3082': 'MO', u'\u3084': 'YA',
u'\u3086': 'YU', u'\u3089': 'RA', u'\u3088': 'YO', u'\u308b': 'RU',
u'\u308a': 'RI', u'\u308d': 'RO', u'\u308c': 'RE', u'\u308f': 'WA',
u'\u3091': 'WE', u'\u3090': 'WI', u'\u3093': 'N', u'\u3092': 'WO',
u'\u304b': 'KA', u'\u304a': 'O', u'\u304d': 'KI', u'\u304c': 'GA',
u'\u304f': 'KU', u'\u304e': 'GI', u'\u3051': 'KE', u'\u3050': 'GU',
u'\u3053': 'KO', u'\u3052': 'GE', u'\u3055': 'SA', u'\u3054': 'GO',
u'\u3057': 'SHI',u'\u3056': 'ZA', u'\u3059': 'SU', u'\u3058': 'JI',
u'\u305b': 'SE', u'\u305a': 'ZU', u'\u305d': 'SO', u'\u305c': 'ZE',
u'\u305f': 'TA', u'\u305e': 'ZO', u'\u3061': 'CHI', u'\u3060': 'DA',
u'\u3062': 'JI', u'\u3065': 'ZU', u'\u3064': 'TSU', u'\u3067': 'DE',
u'\u3066': 'TE', u'\u3069': 'DO', u'\u3068': 'TO', u'\u306b': 'NI',
u'\u306a': 'NA', u'\u306d': 'NE', u'\u306c': 'NU', u'\u306f': 'HA',
u'\u306e': 'NO', u'\u3071': 'PA', u'\u3070': 'BA', u'\u3073': 'BI',
u'\u3072': 'HI', u'\u3075': 'FU', u'\u3074': 'PI', u'\u3077': 'PU',
u'\u3076': 'BU', u'\u3079': 'BE', u'\u3078': 'HE', u'\u307b': 'HO',
u'\u307a': 'PE', u'\u307d': 'PO', u'\u307c': 'BO', u'\u307f': 'MI',
u'\u307e': 'MA'}
romajiToHiragana = {}
for k, v in hiragana.iteritems():
romajiToHiragana.setdefault(v, []).append(k)
# Katakana.
# katakana = {
# }
| 42.386364 | 72 | 0.534048 | """
Japanese language data.
This module contains a dict named 'hiragana' which maps hiragana
unicode characters to romaji pronunciations, as well as a
'romajiToHiragana' dict which maps romaji pronunciation to *lists* of
hiragana characters. There are multiple hiragana characters with the
same pronunciation, thus the multiple values per romaji in the
romajiToHiragana dict.
"""
# Hiragana.
hiragana = {
u'\u3042': 'A', u'\u3044': 'I', u'\u3046': 'U', u'\u3048': 'E',
u'\u3081': 'ME', u'\u3080': 'MU', u'\u3082': 'MO', u'\u3084': 'YA',
u'\u3086': 'YU', u'\u3089': 'RA', u'\u3088': 'YO', u'\u308b': 'RU',
u'\u308a': 'RI', u'\u308d': 'RO', u'\u308c': 'RE', u'\u308f': 'WA',
u'\u3091': 'WE', u'\u3090': 'WI', u'\u3093': 'N', u'\u3092': 'WO',
u'\u304b': 'KA', u'\u304a': 'O', u'\u304d': 'KI', u'\u304c': 'GA',
u'\u304f': 'KU', u'\u304e': 'GI', u'\u3051': 'KE', u'\u3050': 'GU',
u'\u3053': 'KO', u'\u3052': 'GE', u'\u3055': 'SA', u'\u3054': 'GO',
u'\u3057': 'SHI',u'\u3056': 'ZA', u'\u3059': 'SU', u'\u3058': 'JI',
u'\u305b': 'SE', u'\u305a': 'ZU', u'\u305d': 'SO', u'\u305c': 'ZE',
u'\u305f': 'TA', u'\u305e': 'ZO', u'\u3061': 'CHI', u'\u3060': 'DA',
u'\u3062': 'JI', u'\u3065': 'ZU', u'\u3064': 'TSU', u'\u3067': 'DE',
u'\u3066': 'TE', u'\u3069': 'DO', u'\u3068': 'TO', u'\u306b': 'NI',
u'\u306a': 'NA', u'\u306d': 'NE', u'\u306c': 'NU', u'\u306f': 'HA',
u'\u306e': 'NO', u'\u3071': 'PA', u'\u3070': 'BA', u'\u3073': 'BI',
u'\u3072': 'HI', u'\u3075': 'FU', u'\u3074': 'PI', u'\u3077': 'PU',
u'\u3076': 'BU', u'\u3079': 'BE', u'\u3078': 'HE', u'\u307b': 'HO',
u'\u307a': 'PE', u'\u307d': 'PO', u'\u307c': 'BO', u'\u307f': 'MI',
u'\u307e': 'MA'}
romajiToHiragana = {}
for k, v in hiragana.iteritems():
romajiToHiragana.setdefault(v, []).append(k)
# Katakana.
# katakana = {
# }
| 0 | 0 | 0 |
439c78b9ba443d280780aed9883c9b182bd3596b | 1,961 | py | Python | betrayal/models.py | danpatrickoneill/bahoth-be | a46ef2c0bac873c32900784336f0664d43a9bf3f | [
"MIT"
] | null | null | null | betrayal/models.py | danpatrickoneill/bahoth-be | a46ef2c0bac873c32900784336f0664d43a9bf3f | [
"MIT"
] | 4 | 2020-07-17T18:11:49.000Z | 2021-06-04T22:52:30.000Z | betrayal/models.py | danpatrickoneill/bahoth-be | a46ef2c0bac873c32900784336f0664d43a9bf3f | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from uuid import uuid4
# Does it make sense to have a Game model that could be saved as an instance with Chars linked to it? Good to research
name = models.CharField(max_length=100)
card = models.IntegerField(choices=CARD_CHOICES)
effect = models.CharField(max_length=100)
# Would it make sense to have a separate monster model? Could have a more basic "living thing" model that other can inherit from.
# I don't think heros and traitors would need a distinct model, but then the standard character model might need some rarely used fields
# Game manual calls PCs "explorers"; both they and monsters could inherit from character | 35.017857 | 136 | 0.732789 | from django.db import models
from django.contrib.auth.models import User
from uuid import uuid4
# Does it make sense to have a Game model that could be saved as an instance with Chars linked to it? Good to research
class Character(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
speed = models.IntegerField()
might = models.IntegerField()
sanity = models.IntegerField()
knowledge = models.IntegerField()
class Meta:
abstract = True
class PlayerCharacter(Character):
user = user.OneToOneField(User, on_delete=models.CASCADE)
age = models.IntegerField()
height = models.IntegerField()
weight = models.IntegerField()
birthday = models.DateField()
hobbies = models.CharField(max_length=100)
hero = models.BooleanField(default=False)
traitor = models.BooleanField(default=False)
class Monster(Character):
class Room(models.Model):
ITEM = 'I'
MULTIPLE_ITEMS = 'II'
EVENT = 'E'
OMEN = 'O'
CARD_CHOICES = [
(ITEM, 'Item'),
(MULTIPLE_ITEMS, 'Multiple Items'),
(EVENT, 'Event'),
(OMEN, 'Omen'),
]
name = models.CharField(max_length=100)
card = models.IntegerField(choices=CARD_CHOICES)
effect = models.CharField(max_length=100)
class Item(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
# Could this be a "holder/location" field that could be either a character or room? Could be useful
character = models.ForeignKey(
'Character',
on_delete=models.CASCADE
)
omen = models.BooleanField(default=False)
# Would it make sense to have a separate monster model? Could have a more basic "living thing" model that other can inherit from.
# I don't think heros and traitors would need a distinct model, but then the standard character model might need some rarely used fields
# Game manual calls PCs "explorers"; both they and monsters could inherit from character | 0 | 1,137 | 114 |
be6b2f022901b01f3ab5c3b4fced43db992dd4dc | 14,990 | py | Python | examples/python/statespace_structural_harvey_jaeger.py | CCHiggins/statsmodels | 300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e | [
"BSD-3-Clause"
] | 6,931 | 2015-01-01T11:41:55.000Z | 2022-03-31T17:03:24.000Z | examples/python/statespace_structural_harvey_jaeger.py | CCHiggins/statsmodels | 300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e | [
"BSD-3-Clause"
] | 6,137 | 2015-01-01T00:33:45.000Z | 2022-03-31T22:53:17.000Z | examples/python/statespace_structural_harvey_jaeger.py | CCHiggins/statsmodels | 300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e | [
"BSD-3-Clause"
] | 2,608 | 2015-01-02T21:32:31.000Z | 2022-03-31T07:38:30.000Z | #!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook statespace_structural_harvey_jaeger.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Detrending, Stylized Facts and the Business Cycle
#
# In an influential article, Harvey and Jaeger (1993) described the use of
# unobserved components models (also known as "structural time series
# models") to derive stylized facts of the business cycle.
#
# Their paper begins:
#
# "Establishing the 'stylized facts' associated with a set of time
# series is widely considered a crucial step
# in macroeconomic research ... For such facts to be useful they
# should (1) be consistent with the stochastic
# properties of the data and (2) present meaningful information."
#
# In particular, they make the argument that these goals are often better
# met using the unobserved components approach rather than the popular
# Hodrick-Prescott filter or Box-Jenkins ARIMA modeling techniques.
#
# statsmodels has the ability to perform all three types of analysis, and
# below we follow the steps of their paper, using a slightly updated
# dataset.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from IPython.display import display, Latex
# ## Unobserved Components
#
# The unobserved components model available in statsmodels can be written
# as:
#
# $$
# y_t = \underbrace{\mu_{t}}_{\text{trend}} +
# \underbrace{\gamma_{t}}_{\text{seasonal}} +
# \underbrace{c_{t}}_{\text{cycle}} + \sum_{j=1}^k \underbrace{\beta_j
# x_{jt}}_{\text{explanatory}} +
# \underbrace{\varepsilon_t}_{\text{irregular}}
# $$
#
# see Durbin and Koopman 2012, Chapter 3 for notation and additional
# details. Notice that different specifications for the different individual
# components can support a wide range of models. The specific models
# considered in the paper and below are specializations of this general
# equation.
#
# ### Trend
#
# The trend component is a dynamic extension of a regression model that
# includes an intercept and linear time-trend.
#
# $$
# \begin{align}
# \underbrace{\mu_{t+1}}_{\text{level}} & = \mu_t + \nu_t + \eta_{t+1}
# \qquad & \eta_{t+1} \sim N(0, \sigma_\eta^2) \\\\
# \underbrace{\nu_{t+1}}_{\text{trend}} & = \nu_t + \zeta_{t+1} &
# \zeta_{t+1} \sim N(0, \sigma_\zeta^2) \\
# \end{align}
# $$
#
# where the level is a generalization of the intercept term that can
# dynamically vary across time, and the trend is a generalization of the
# time-trend such that the slope can dynamically vary across time.
#
# For both elements (level and trend), we can consider models in which:
#
# - The element is included vs excluded (if the trend is included, there
# must also be a level included).
# - The element is deterministic vs stochastic (i.e. whether or not the
# variance on the error term is confined to be zero or not)
#
# The only additional parameters to be estimated via MLE are the variances
# of any included stochastic components.
#
# This leads to the following specifications:
#
# | |
# Level | Trend | Stochastic Level | Stochastic Trend |
# |----------------------------------------------------------------------|
# -------|-------|------------------|------------------|
# | Constant |
# ✓ | | | |
# | Local Level <br /> (random walk) |
# ✓ | | ✓ | |
# | Deterministic trend |
# ✓ | ✓ | | |
# | Local level with deterministic trend <br /> (random walk with drift) |
# ✓ | ✓ | ✓ | |
# | Local linear trend |
# ✓ | ✓ | ✓ | ✓ |
# | Smooth trend <br /> (integrated random walk) |
# ✓ | ✓ | | ✓ |
#
# ### Seasonal
#
# The seasonal component is written as:
#
# <span>$$
# \gamma_t = - \sum_{j=1}^{s-1} \gamma_{t+1-j} + \omega_t \qquad \omega_t
# \sim N(0, \sigma_\omega^2)
# $$</span>
#
# The periodicity (number of seasons) is `s`, and the defining character
# is that (without the error term), the seasonal components sum to zero
# across one complete cycle. The inclusion of an error term allows the
# seasonal effects to vary over time.
#
# The variants of this model are:
#
# - The periodicity `s`
# - Whether or not to make the seasonal effects stochastic.
#
# If the seasonal effect is stochastic, then there is one additional
# parameter to estimate via MLE (the variance of the error term).
#
# ### Cycle
#
# The cyclical component is intended to capture cyclical effects at time
# frames much longer than captured by the seasonal component. For example,
# in economics the cyclical term is often intended to capture the business
# cycle, and is then expected to have a period between "1.5 and 12 years"
# (see Durbin and Koopman).
#
# The cycle is written as:
#
# <span>$$
# \begin{align}
# c_{t+1} & = c_t \cos \lambda_c + c_t^* \sin \lambda_c + \tilde \omega_t
# \qquad & \tilde \omega_t \sim N(0, \sigma_{\tilde \omega}^2) \\\\
# c_{t+1}^* & = -c_t \sin \lambda_c + c_t^* \cos \lambda_c + \tilde
# \omega_t^* & \tilde \omega_t^* \sim N(0, \sigma_{\tilde \omega}^2)
# \end{align}
# $$</span>
#
# The parameter $\lambda_c$ (the frequency of the cycle) is an additional
# parameter to be estimated by MLE. If the seasonal effect is stochastic,
# then there is one another parameter to estimate (the variance of the error
# term - note that both of the error terms here share the same variance, but
# are assumed to have independent draws).
#
# ### Irregular
#
# The irregular component is assumed to be a white noise error term. Its
# variance is a parameter to be estimated by MLE; i.e.
#
# $$
# \varepsilon_t \sim N(0, \sigma_\varepsilon^2)
# $$
#
# In some cases, we may want to generalize the irregular component to
# allow for autoregressive effects:
#
# $$
# \varepsilon_t = \rho(L) \varepsilon_{t-1} + \epsilon_t, \qquad
# \epsilon_t \sim N(0, \sigma_\epsilon^2)
# $$
#
# In this case, the autoregressive parameters would also be estimated via
# MLE.
#
# ### Regression effects
#
# We may want to allow for explanatory variables by including additional
# terms
#
# <span>$$
# \sum_{j=1}^k \beta_j x_{jt}
# $$</span>
#
# or for intervention effects by including
#
# <span>$$
# \begin{align}
# \delta w_t \qquad \text{where} \qquad w_t & = 0, \qquad t < \tau, \\\\
# & = 1, \qquad t \ge \tau
# \end{align}
# $$</span>
#
# These additional parameters could be estimated via MLE or by including
# them as components of the state space formulation.
#
# ## Data
#
# Following Harvey and Jaeger, we will consider the following time series:
#
# - US real GNP, "output",
# ([GNPC96](https://research.stlouisfed.org/fred2/series/GNPC96))
# - US GNP implicit price deflator, "prices",
# ([GNPDEF](https://research.stlouisfed.org/fred2/series/GNPDEF))
# - US monetary base, "money",
# ([AMBSL](https://research.stlouisfed.org/fred2/series/AMBSL))
#
# The time frame in the original paper varied across series, but was
# broadly 1954-1989. Below we use data from the period 1948-2008 for all
# series. Although the unobserved components approach allows isolating a
# seasonal component within the model, the series considered in the paper,
# and here, are already seasonally adjusted.
#
# All data series considered here are taken from [Federal Reserve Economic
# Data (FRED)](https://research.stlouisfed.org/fred2/). Conveniently, the
# Python library [Pandas](https://pandas.pydata.org/) has the ability to
# download data from FRED directly.
# Datasets
from pandas_datareader.data import DataReader
# Get the raw data
start = '1948-01'
end = '2008-01'
us_gnp = DataReader('GNPC96', 'fred', start=start, end=end)
us_gnp_deflator = DataReader('GNPDEF', 'fred', start=start, end=end)
us_monetary_base = DataReader('AMBSL', 'fred', start=start,
end=end).resample('QS').mean()
recessions = DataReader('USRECQ', 'fred', start=start,
end=end).resample('QS').last().values[:, 0]
# Construct the dataframe
dta = pd.concat(map(np.log, (us_gnp, us_gnp_deflator, us_monetary_base)),
axis=1)
dta.columns = ['US GNP', 'US Prices', 'US monetary base']
dta.index.freq = dta.index.inferred_freq
dates = dta.index._mpl_repr()
# To get a sense of these three variables over the timeframe, we can plot
# them:
# Plot the data
ax = dta.plot(figsize=(13, 3))
ylim = ax.get_ylim()
ax.xaxis.grid()
ax.fill_between(dates,
ylim[0] + 1e-5,
ylim[1] - 1e-5,
recessions,
facecolor='k',
alpha=0.1)
# ## Model
#
# Since the data is already seasonally adjusted and there are no obvious
# explanatory variables, the generic model considered is:
#
# $$
# y_t = \underbrace{\mu_{t}}_{\text{trend}} +
# \underbrace{c_{t}}_{\text{cycle}} +
# \underbrace{\varepsilon_t}_{\text{irregular}}
# $$
#
# The irregular will be assumed to be white noise, and the cycle will be
# stochastic and damped. The final modeling choice is the specification to
# use for the trend component. Harvey and Jaeger consider two models:
#
# 1. Local linear trend (the "unrestricted" model)
# 2. Smooth trend (the "restricted" model, since we are forcing
# $\sigma_\eta = 0$)
#
# Below, we construct `kwargs` dictionaries for each of these model types.
# Notice that rather that there are two ways to specify the models. One way
# is to specify components directly, as in the table above. The other way is
# to use string names which map to various specifications.
# Model specifications
# Unrestricted model, using string specification
unrestricted_model = {
'level': 'local linear trend',
'cycle': True,
'damped_cycle': True,
'stochastic_cycle': True
}
# Unrestricted model, setting components directly
# This is an equivalent, but less convenient, way to specify a
# local linear trend model with a stochastic damped cycle:
# unrestricted_model = {
# 'irregular': True, 'level': True, 'stochastic_level': True, 'trend':
# True, 'stochastic_trend': True,
# 'cycle': True, 'damped_cycle': True, 'stochastic_cycle': True
# }
# The restricted model forces a smooth trend
restricted_model = {
'level': 'smooth trend',
'cycle': True,
'damped_cycle': True,
'stochastic_cycle': True
}
# Restricted model, setting components directly
# This is an equivalent, but less convenient, way to specify a
# smooth trend model with a stochastic damped cycle. Notice
# that the difference from the local linear trend model is that
# `stochastic_level=False` here.
# unrestricted_model = {
# 'irregular': True, 'level': True, 'stochastic_level': False,
# 'trend': True, 'stochastic_trend': True,
# 'cycle': True, 'damped_cycle': True, 'stochastic_cycle': True
# }
# We now fit the following models:
#
# 1. Output, unrestricted model
# 2. Prices, unrestricted model
# 3. Prices, restricted model
# 4. Money, unrestricted model
# 5. Money, restricted model
# Output
output_mod = sm.tsa.UnobservedComponents(dta['US GNP'], **unrestricted_model)
output_res = output_mod.fit(method='powell', disp=False)
# Prices
prices_mod = sm.tsa.UnobservedComponents(dta['US Prices'],
**unrestricted_model)
prices_res = prices_mod.fit(method='powell', disp=False)
prices_restricted_mod = sm.tsa.UnobservedComponents(dta['US Prices'],
**restricted_model)
prices_restricted_res = prices_restricted_mod.fit(method='powell', disp=False)
# Money
money_mod = sm.tsa.UnobservedComponents(dta['US monetary base'],
**unrestricted_model)
money_res = money_mod.fit(method='powell', disp=False)
money_restricted_mod = sm.tsa.UnobservedComponents(dta['US monetary base'],
**restricted_model)
money_restricted_res = money_restricted_mod.fit(method='powell', disp=False)
# Once we have fit these models, there are a variety of ways to display
# the information. Looking at the model of US GNP, we can summarize the fit
# of the model using the `summary` method on the fit object.
print(output_res.summary())
# For unobserved components models, and in particular when exploring
# stylized facts in line with point (2) from the introduction, it is often
# more instructive to plot the estimated unobserved components (e.g. the
# level, trend, and cycle) themselves to see if they provide a meaningful
# description of the data.
#
# The `plot_components` method of the fit object can be used to show plots
# and confidence intervals of each of the estimated states, as well as a
# plot of the observed data versus the one-step-ahead predictions of the
# model to assess fit.
fig = output_res.plot_components(legend_loc='lower right', figsize=(15, 9))
# Finally, Harvey and Jaeger summarize the models in another way to
# highlight the relative importances of the trend and cyclical components;
# below we replicate their Table I. The values we find are broadly
# consistent with, but different in the particulars from, the values from
# their table.
# Create Table I
table_i = np.zeros((5, 6))
start = dta.index[0]
end = dta.index[-1]
time_range = '%d:%d-%d:%d' % (start.year, start.quarter, end.year, end.quarter)
models = [
('US GNP', time_range, 'None'),
('US Prices', time_range, 'None'),
('US Prices', time_range, r'$\sigma_\eta^2 = 0$'),
('US monetary base', time_range, 'None'),
('US monetary base', time_range, r'$\sigma_\eta^2 = 0$'),
]
index = pd.MultiIndex.from_tuples(
models, names=['Series', 'Time range', 'Restrictions'])
parameter_symbols = [
r'$\sigma_\zeta^2$',
r'$\sigma_\eta^2$',
r'$\sigma_\kappa^2$',
r'$\rho$',
r'$2 \pi / \lambda_c$',
r'$\sigma_\varepsilon^2$',
]
i = 0
for res in (output_res, prices_res, prices_restricted_res, money_res,
money_restricted_res):
if res.model.stochastic_level:
(sigma_irregular, sigma_level, sigma_trend, sigma_cycle,
frequency_cycle, damping_cycle) = res.params
else:
(sigma_irregular, sigma_level, sigma_cycle, frequency_cycle,
damping_cycle) = res.params
sigma_trend = np.nan
period_cycle = 2 * np.pi / frequency_cycle
table_i[i, :] = [
sigma_level * 1e7, sigma_trend * 1e7, sigma_cycle * 1e7, damping_cycle,
period_cycle, sigma_irregular * 1e7
]
i += 1
pd.set_option('float_format', lambda x: '%.4g' % np.round(x, 2)
if not np.isnan(x) else '-')
table_i = pd.DataFrame(table_i, index=index, columns=parameter_symbols)
table_i
| 36.207729 | 79 | 0.663175 | #!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook statespace_structural_harvey_jaeger.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Detrending, Stylized Facts and the Business Cycle
#
# In an influential article, Harvey and Jaeger (1993) described the use of
# unobserved components models (also known as "structural time series
# models") to derive stylized facts of the business cycle.
#
# Their paper begins:
#
# "Establishing the 'stylized facts' associated with a set of time
# series is widely considered a crucial step
# in macroeconomic research ... For such facts to be useful they
# should (1) be consistent with the stochastic
# properties of the data and (2) present meaningful information."
#
# In particular, they make the argument that these goals are often better
# met using the unobserved components approach rather than the popular
# Hodrick-Prescott filter or Box-Jenkins ARIMA modeling techniques.
#
# statsmodels has the ability to perform all three types of analysis, and
# below we follow the steps of their paper, using a slightly updated
# dataset.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from IPython.display import display, Latex
# ## Unobserved Components
#
# The unobserved components model available in statsmodels can be written
# as:
#
# $$
# y_t = \underbrace{\mu_{t}}_{\text{trend}} +
# \underbrace{\gamma_{t}}_{\text{seasonal}} +
# \underbrace{c_{t}}_{\text{cycle}} + \sum_{j=1}^k \underbrace{\beta_j
# x_{jt}}_{\text{explanatory}} +
# \underbrace{\varepsilon_t}_{\text{irregular}}
# $$
#
# see Durbin and Koopman 2012, Chapter 3 for notation and additional
# details. Notice that different specifications for the different individual
# components can support a wide range of models. The specific models
# considered in the paper and below are specializations of this general
# equation.
#
# ### Trend
#
# The trend component is a dynamic extension of a regression model that
# includes an intercept and linear time-trend.
#
# $$
# \begin{align}
# \underbrace{\mu_{t+1}}_{\text{level}} & = \mu_t + \nu_t + \eta_{t+1}
# \qquad & \eta_{t+1} \sim N(0, \sigma_\eta^2) \\\\
# \underbrace{\nu_{t+1}}_{\text{trend}} & = \nu_t + \zeta_{t+1} &
# \zeta_{t+1} \sim N(0, \sigma_\zeta^2) \\
# \end{align}
# $$
#
# where the level is a generalization of the intercept term that can
# dynamically vary across time, and the trend is a generalization of the
# time-trend such that the slope can dynamically vary across time.
#
# For both elements (level and trend), we can consider models in which:
#
# - The element is included vs excluded (if the trend is included, there
# must also be a level included).
# - The element is deterministic vs stochastic (i.e. whether or not the
# variance on the error term is confined to be zero or not)
#
# The only additional parameters to be estimated via MLE are the variances
# of any included stochastic components.
#
# This leads to the following specifications:
#
# | |
# Level | Trend | Stochastic Level | Stochastic Trend |
# |----------------------------------------------------------------------|
# -------|-------|------------------|------------------|
# | Constant |
# ✓ | | | |
# | Local Level <br /> (random walk) |
# ✓ | | ✓ | |
# | Deterministic trend |
# ✓ | ✓ | | |
# | Local level with deterministic trend <br /> (random walk with drift) |
# ✓ | ✓ | ✓ | |
# | Local linear trend |
# ✓ | ✓ | ✓ | ✓ |
# | Smooth trend <br /> (integrated random walk) |
# ✓ | ✓ | | ✓ |
#
# ### Seasonal
#
# The seasonal component is written as:
#
# <span>$$
# \gamma_t = - \sum_{j=1}^{s-1} \gamma_{t+1-j} + \omega_t \qquad \omega_t
# \sim N(0, \sigma_\omega^2)
# $$</span>
#
# The periodicity (number of seasons) is `s`, and the defining character
# is that (without the error term), the seasonal components sum to zero
# across one complete cycle. The inclusion of an error term allows the
# seasonal effects to vary over time.
#
# The variants of this model are:
#
# - The periodicity `s`
# - Whether or not to make the seasonal effects stochastic.
#
# If the seasonal effect is stochastic, then there is one additional
# parameter to estimate via MLE (the variance of the error term).
#
# ### Cycle
#
# The cyclical component is intended to capture cyclical effects at time
# frames much longer than captured by the seasonal component. For example,
# in economics the cyclical term is often intended to capture the business
# cycle, and is then expected to have a period between "1.5 and 12 years"
# (see Durbin and Koopman).
#
# The cycle is written as:
#
# <span>$$
# \begin{align}
# c_{t+1} & = c_t \cos \lambda_c + c_t^* \sin \lambda_c + \tilde \omega_t
# \qquad & \tilde \omega_t \sim N(0, \sigma_{\tilde \omega}^2) \\\\
# c_{t+1}^* & = -c_t \sin \lambda_c + c_t^* \cos \lambda_c + \tilde
# \omega_t^* & \tilde \omega_t^* \sim N(0, \sigma_{\tilde \omega}^2)
# \end{align}
# $$</span>
#
# The parameter $\lambda_c$ (the frequency of the cycle) is an additional
# parameter to be estimated by MLE. If the seasonal effect is stochastic,
# then there is one another parameter to estimate (the variance of the error
# term - note that both of the error terms here share the same variance, but
# are assumed to have independent draws).
#
# ### Irregular
#
# The irregular component is assumed to be a white noise error term. Its
# variance is a parameter to be estimated by MLE; i.e.
#
# $$
# \varepsilon_t \sim N(0, \sigma_\varepsilon^2)
# $$
#
# In some cases, we may want to generalize the irregular component to
# allow for autoregressive effects:
#
# $$
# \varepsilon_t = \rho(L) \varepsilon_{t-1} + \epsilon_t, \qquad
# \epsilon_t \sim N(0, \sigma_\epsilon^2)
# $$
#
# In this case, the autoregressive parameters would also be estimated via
# MLE.
#
# ### Regression effects
#
# We may want to allow for explanatory variables by including additional
# terms
#
# <span>$$
# \sum_{j=1}^k \beta_j x_{jt}
# $$</span>
#
# or for intervention effects by including
#
# <span>$$
# \begin{align}
# \delta w_t \qquad \text{where} \qquad w_t & = 0, \qquad t < \tau, \\\\
# & = 1, \qquad t \ge \tau
# \end{align}
# $$</span>
#
# These additional parameters could be estimated via MLE or by including
# them as components of the state space formulation.
#
# ## Data
#
# Following Harvey and Jaeger, we will consider the following time series:
#
# - US real GNP, "output",
# ([GNPC96](https://research.stlouisfed.org/fred2/series/GNPC96))
# - US GNP implicit price deflator, "prices",
# ([GNPDEF](https://research.stlouisfed.org/fred2/series/GNPDEF))
# - US monetary base, "money",
# ([AMBSL](https://research.stlouisfed.org/fred2/series/AMBSL))
#
# The time frame in the original paper varied across series, but was
# broadly 1954-1989. Below we use data from the period 1948-2008 for all
# series. Although the unobserved components approach allows isolating a
# seasonal component within the model, the series considered in the paper,
# and here, are already seasonally adjusted.
#
# All data series considered here are taken from [Federal Reserve Economic
# Data (FRED)](https://research.stlouisfed.org/fred2/). Conveniently, the
# Python library [Pandas](https://pandas.pydata.org/) has the ability to
# download data from FRED directly.
# Datasets
from pandas_datareader.data import DataReader
# Get the raw data
start = '1948-01'
end = '2008-01'
us_gnp = DataReader('GNPC96', 'fred', start=start, end=end)
us_gnp_deflator = DataReader('GNPDEF', 'fred', start=start, end=end)
us_monetary_base = DataReader('AMBSL', 'fred', start=start,
end=end).resample('QS').mean()
recessions = DataReader('USRECQ', 'fred', start=start,
end=end).resample('QS').last().values[:, 0]
# Construct the dataframe
dta = pd.concat(map(np.log, (us_gnp, us_gnp_deflator, us_monetary_base)),
axis=1)
dta.columns = ['US GNP', 'US Prices', 'US monetary base']
dta.index.freq = dta.index.inferred_freq
dates = dta.index._mpl_repr()
# To get a sense of these three variables over the timeframe, we can plot
# them:
# Plot the data
ax = dta.plot(figsize=(13, 3))
ylim = ax.get_ylim()
ax.xaxis.grid()
ax.fill_between(dates,
ylim[0] + 1e-5,
ylim[1] - 1e-5,
recessions,
facecolor='k',
alpha=0.1)
# ## Model
#
# Since the data is already seasonally adjusted and there are no obvious
# explanatory variables, the generic model considered is:
#
# $$
# y_t = \underbrace{\mu_{t}}_{\text{trend}} +
# \underbrace{c_{t}}_{\text{cycle}} +
# \underbrace{\varepsilon_t}_{\text{irregular}}
# $$
#
# The irregular will be assumed to be white noise, and the cycle will be
# stochastic and damped. The final modeling choice is the specification to
# use for the trend component. Harvey and Jaeger consider two models:
#
# 1. Local linear trend (the "unrestricted" model)
# 2. Smooth trend (the "restricted" model, since we are forcing
# $\sigma_\eta = 0$)
#
# Below, we construct `kwargs` dictionaries for each of these model types.
# Notice that rather that there are two ways to specify the models. One way
# is to specify components directly, as in the table above. The other way is
# to use string names which map to various specifications.
# Model specifications
# Unrestricted model, using string specification
unrestricted_model = {
'level': 'local linear trend',
'cycle': True,
'damped_cycle': True,
'stochastic_cycle': True
}
# Unrestricted model, setting components directly
# This is an equivalent, but less convenient, way to specify a
# local linear trend model with a stochastic damped cycle:
# unrestricted_model = {
# 'irregular': True, 'level': True, 'stochastic_level': True, 'trend':
# True, 'stochastic_trend': True,
# 'cycle': True, 'damped_cycle': True, 'stochastic_cycle': True
# }
# The restricted model forces a smooth trend
restricted_model = {
'level': 'smooth trend',
'cycle': True,
'damped_cycle': True,
'stochastic_cycle': True
}
# Restricted model, setting components directly
# This is an equivalent, but less convenient, way to specify a
# smooth trend model with a stochastic damped cycle. Notice
# that the difference from the local linear trend model is that
# `stochastic_level=False` here.
# unrestricted_model = {
# 'irregular': True, 'level': True, 'stochastic_level': False,
# 'trend': True, 'stochastic_trend': True,
# 'cycle': True, 'damped_cycle': True, 'stochastic_cycle': True
# }
# We now fit the following models:
#
# 1. Output, unrestricted model
# 2. Prices, unrestricted model
# 3. Prices, restricted model
# 4. Money, unrestricted model
# 5. Money, restricted model
# Output
output_mod = sm.tsa.UnobservedComponents(dta['US GNP'], **unrestricted_model)
output_res = output_mod.fit(method='powell', disp=False)
# Prices
prices_mod = sm.tsa.UnobservedComponents(dta['US Prices'],
**unrestricted_model)
prices_res = prices_mod.fit(method='powell', disp=False)
prices_restricted_mod = sm.tsa.UnobservedComponents(dta['US Prices'],
**restricted_model)
prices_restricted_res = prices_restricted_mod.fit(method='powell', disp=False)
# Money
money_mod = sm.tsa.UnobservedComponents(dta['US monetary base'],
**unrestricted_model)
money_res = money_mod.fit(method='powell', disp=False)
money_restricted_mod = sm.tsa.UnobservedComponents(dta['US monetary base'],
**restricted_model)
money_restricted_res = money_restricted_mod.fit(method='powell', disp=False)
# Once we have fit these models, there are a variety of ways to display
# the information. Looking at the model of US GNP, we can summarize the fit
# of the model using the `summary` method on the fit object.
print(output_res.summary())
# For unobserved components models, and in particular when exploring
# stylized facts in line with point (2) from the introduction, it is often
# more instructive to plot the estimated unobserved components (e.g. the
# level, trend, and cycle) themselves to see if they provide a meaningful
# description of the data.
#
# The `plot_components` method of the fit object can be used to show plots
# and confidence intervals of each of the estimated states, as well as a
# plot of the observed data versus the one-step-ahead predictions of the
# model to assess fit.
fig = output_res.plot_components(legend_loc='lower right', figsize=(15, 9))
# Finally, Harvey and Jaeger summarize the models in another way to
# highlight the relative importances of the trend and cyclical components;
# below we replicate their Table I. The values we find are broadly
# consistent with, but different in the particulars from, the values from
# their table.
# Create Table I
table_i = np.zeros((5, 6))
start = dta.index[0]
end = dta.index[-1]
time_range = '%d:%d-%d:%d' % (start.year, start.quarter, end.year, end.quarter)
models = [
('US GNP', time_range, 'None'),
('US Prices', time_range, 'None'),
('US Prices', time_range, r'$\sigma_\eta^2 = 0$'),
('US monetary base', time_range, 'None'),
('US monetary base', time_range, r'$\sigma_\eta^2 = 0$'),
]
index = pd.MultiIndex.from_tuples(
models, names=['Series', 'Time range', 'Restrictions'])
parameter_symbols = [
r'$\sigma_\zeta^2$',
r'$\sigma_\eta^2$',
r'$\sigma_\kappa^2$',
r'$\rho$',
r'$2 \pi / \lambda_c$',
r'$\sigma_\varepsilon^2$',
]
i = 0
for res in (output_res, prices_res, prices_restricted_res, money_res,
money_restricted_res):
if res.model.stochastic_level:
(sigma_irregular, sigma_level, sigma_trend, sigma_cycle,
frequency_cycle, damping_cycle) = res.params
else:
(sigma_irregular, sigma_level, sigma_cycle, frequency_cycle,
damping_cycle) = res.params
sigma_trend = np.nan
period_cycle = 2 * np.pi / frequency_cycle
table_i[i, :] = [
sigma_level * 1e7, sigma_trend * 1e7, sigma_cycle * 1e7, damping_cycle,
period_cycle, sigma_irregular * 1e7
]
i += 1
pd.set_option('float_format', lambda x: '%.4g' % np.round(x, 2)
if not np.isnan(x) else '-')
table_i = pd.DataFrame(table_i, index=index, columns=parameter_symbols)
table_i
| 0 | 0 | 0 |
f190da5c9804f882b9d755b55a52f520d0dec763 | 554 | py | Python | starfish/core/image/Segment/_base.py | haoxusci/starfish | d7bd856024c75f2ce41504406f2a663566c3814b | [
"MIT"
] | 164 | 2018-03-21T21:52:56.000Z | 2022-03-23T17:14:39.000Z | starfish/core/image/Segment/_base.py | lbgbox/starfish | 0e879d995d5c49b6f5a842e201e3be04c91afc7e | [
"MIT"
] | 1,728 | 2018-03-15T23:16:09.000Z | 2022-03-12T00:09:18.000Z | starfish/core/image/Segment/_base.py | lbgbox/starfish | 0e879d995d5c49b6f5a842e201e3be04c91afc7e | [
"MIT"
] | 66 | 2018-03-25T17:21:15.000Z | 2022-01-16T09:17:11.000Z | from abc import abstractmethod
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.pipeline.algorithmbase import AlgorithmBase
| 29.157895 | 69 | 0.725632 | from abc import abstractmethod
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.pipeline.algorithmbase import AlgorithmBase
class SegmentAlgorithm(metaclass=AlgorithmBase):
@abstractmethod
def run(
self,
primary_image_stack: ImageStack,
nuclei_stack: ImageStack,
*args
) -> BinaryMaskCollection:
"""Performs segmentation on the stack provided."""
raise NotImplementedError()
| 0 | 306 | 23 |
e377068f2019742593bec47b1fbe028eff9d6e7b | 1,358 | py | Python | django-serveur/consumption/models.py | micdevcamp/BeerStock | 1517a79ced5f796b8e316f0fb041854aff9bbbad | [
"MIT"
] | null | null | null | django-serveur/consumption/models.py | micdevcamp/BeerStock | 1517a79ced5f796b8e316f0fb041854aff9bbbad | [
"MIT"
] | null | null | null | django-serveur/consumption/models.py | micdevcamp/BeerStock | 1517a79ced5f796b8e316f0fb041854aff9bbbad | [
"MIT"
] | null | null | null | from django.db import models
from datetime import datetime
| 29.521739 | 66 | 0.654639 | from django.db import models
from datetime import datetime
class User(models.Model):
first_name = models.CharField(max_length=200, null=True)
last_name = models.CharField(max_length=200, null=True)
facebook_id = models.CharField(max_length=100, unique=True)
rfid_uid_0 = models.IntegerField(default=0)
rfid_uid_1 = models.IntegerField(default=0)
rfid_uid_2 = models.IntegerField(default=0)
rfid_uid_3 = models.IntegerField(default=0)
def __str__(self):
if(self.first_name and self.last_name):
return self.first_name + ', ' + self.last_name
else:
return self.facebook_id
def set_rfid(self, r0, r1, r2, r3):
self.rfid_uid_0 = r0
self.rfid_uid_1 = r1
self.rfid_uid_2 = r2
self.rfid_uid_3 = r3
class Valve(models.Model):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Record(models.Model):
user = models.ForeignKey(User)
quantity = models.FloatField()
pulse = models.IntegerField(null=True)
valve = models.ForeignKey(Valve, null=True)
time = models.DateTimeField(default=datetime.now)
def __str__(self):
if self.user:
return str(self.user) + '[' + str(self.quantity) + ']'
else:
return 'no user' + '[' + str(self.quantity) + ']'
| 468 | 759 | 69 |
fb50b92d8ad970ebb1b2a03e60cd8d784d47fc34 | 13,833 | py | Python | engineer/plugins/core.py | tylerbutler/engineer | 1fdcae512a828ea681be8c469f6863b974260614 | [
"MIT"
] | 6 | 2015-09-21T17:50:43.000Z | 2021-04-13T07:27:39.000Z | engineer/plugins/core.py | tylerbutler/engineer | 1fdcae512a828ea681be8c469f6863b974260614 | [
"MIT"
] | 40 | 2015-08-25T00:33:28.000Z | 2020-09-22T17:54:52.000Z | engineer/plugins/core.py | tylerbutler/engineer | 1fdcae512a828ea681be8c469f6863b974260614 | [
"MIT"
] | 5 | 2016-03-26T15:02:56.000Z | 2017-03-16T00:06:10.000Z | # coding=utf-8
import logging
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
# Adapted from Marty Alchin: http://martyalchin.com/2008/jan/10/simple-plugin-framework/
#noinspection PyUnresolvedReferences,PyUnusedLocal
def load_plugins():
"""Load all plugins."""
# Ensure the built-in plugins are loaded by importing the module
from engineer.plugins import bundled
# Load registered plugin modules
for name, module in find_plugins('engineer.plugins'):
# No need to import the module manually because find_plugins will do that.
pass
#noinspection PyMissingConstructor,PyUnusedLocal
class PluginMount(type):
"""A metaclass used to identify :ref:`plugins`."""
class ThemeProvider(PluginMixin):
"""
Base class for Theme :ref:`plugins`.
ThemeProvider subclasses must provide a value for :attr:`~engineer.plugins.ThemeProvider.paths`.
.. versionchanged:: 0.3.0
"""
__metaclass__ = PluginMount
paths = () # empty tuple
"""An iterable of absolute paths containing one or more :ref:`theme manifests <theme manifest>`."""
class PostProcessor(PluginMixin):
"""
Base class for Post Processor :ref:`plugins`.
PostProcessor subclasses should provide implementations for :meth:`~engineer.plugins.PostProcessor.preprocess` or
:meth:`~engineer.plugins.PostProcessor.postprocess` (or both) as appropriate.
"""
__metaclass__ = PluginMount
@classmethod
def preprocess(cls, post, metadata):
"""
The ``preprocess`` method is called during the Post import process, before any post metadata defaults
have been set.
The preprocess method should use the ``content_preprocessed`` attribute to get/modify the content of *post*.
This ensures that preprocessors from other plugins can be chained together.
By default, the ``content_preprocessed`` value is used only
for generating post HTML. It is not written back to the source post file. However, sometimes you may want
to make a permanent change to the post content that is written out. In this case, you should call the
:meth:`~engineer.models.Post.set_finalized_content` method, passing it the modified content. This
method will ensure the data is written back to the source file by the :ref:`metadata finalization` plugin.
This means that in order for a plugin to write preprocessed data back to the post file,
the :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must be
enabled.
Your plugin will also need to be explicitly granted the ``MODIFY_RAW_POST`` permission. See more
detail in :ref:`plugin permissions`.
In addition, the preprocess method can add/remove/update properties on the *post* object itself as needed.
.. tip::
Since the :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must be enabled for
plugins to write back to source post files, you should check this setting in addition to any other
settings you may be using.
:param post: The post being currently processed by Engineer.
:param metadata: A dict of the post metadata contained in the post source file. It contains no
default values - only the values contained within the post source file itself. The preprocess method can
add, update, or otherwise manipulate metadata prior to it being processed by Engineer manipulating this
parameter.
:return: The *post* and *metadata* values should be returned (as a 2-tuple) by the method.
"""
return post, metadata
@classmethod
def postprocess(cls, post):
"""
The ``postprocess`` method is called after the post has been imported and processed as well as converted to
HTML and output.
:param post: The post being currently processed by Engineer.
:return: The *post* parameter should be returned.
"""
return post
class CommandPlugin(PluginMixin):
"""
Base class for Command :ref:`plugins`.
Command plugins add new commands to the :ref:`cmdline`. CommandPlugin subclasses must provide an implementation
for :meth:`~engineer.plugins.CommandPlugin.add_command`, and can optionally override
the :meth:`~engineer.plugins.CommandPlugin.active` classmethod to determine whether or not the plugin should
actually be loaded.
.. note::
Because Engineer uses :mod:`argparse` for parsing out its commands, you should be somewhat familiar with
it in order to implement a Command plugin.
.. seealso:: :ref:`command plugin examples`
"""
__metaclass__ = PluginMount
@classmethod
def active(cls):
"""
If this method returns ``False``, the plugin will not run and any commands added by the plugin will not
be available.
This method can be overridden to make commands available only if certain criteria are met (for example,
a custom :ref:`setting<settings>`).
:return: A boolean value indicating whether or not the plugin is active and should run. Default
implementation always returns ``True``.
"""
return True
@classmethod
def add_command(cls, subparser, main_parser, common_parser):
"""
This method is called by Engineer while it is building its :class:`~argparse.ArgumentParser`,
allowing one to add addition parsers and subparsers to supplement the core :ref:`Engineer commands<cmdline>`.
:param subparser:
Since Engineer's built-in commands are subparsers, :meth:`~argparse.ArgumentParser.add_subparsers` is
called to generate a subparser. :mod:`argparse` only supports
calling :meth:`~argparse.ArgumentParser.add_subparsers` once, so the subparser object itself (the result
of the initial :meth:`~argparse.ArgumentParser.add_subparsers` call Engineer made when building its
parser) is passed in this parameter. This allows you to add either another top-level command by calling
``add_parser()`` then adding arguments directly, or to create further nested commands by adding a parser
with additional subparsers within it.
:param main_parser:
The top level :class:`~argparse.ArgumentParser` used by Engineer. This is generally only useful if you're
using an :mod:`argparse` wrapper library such as `argh <http://packages.python.org/argh/index.html>`_ in
your plugin. Most wrapper libraries require the root :class:`~argparse.ArgumentParser` object to add their
subparsers to. If you're using :mod:`argparse` directly, you can ignore this parameter and work with
the ``subparser`` parameter exclusively.
:param common_parser:
Engineer provides several :ref:`common arguments<engineer>` for its commands. If you wish to makes these
arguments available for your custom commands, you should pass ``common_parser`` in
to ``add_parser()`` via the ``parents`` parameter.
"""
raise NotImplementedError()
class JinjaEnvironmentPlugin(PluginMixin):
"""
Base class for JinjaEnvironment :ref:`plugins`.
JinjaEnvironment plugins can supplement the Jinja 2 environment with things like filters and global
functions. These additions can then be used in your Jinja templates.
.. versionadded:: 0.5.0
"""
__metaclass__ = PluginMount
filters = {}
"""
A dict of filters to add to the Jinja environment. The key of each entry should be the name of the filter (as it
will be used inside templates), while the value should be the filter function. If you require more custom logic
to build the dict of filters, override the :meth:`~engineer.plugins.JinjaEnvironmentPlugin.get_filters` method.
"""
globals = {}
"""
A dict of functions to add to the Jinja environment globally. The key of each entry should be the name of the
function (as it will be used inside templates), while the value should be the function itself. If you require more
custom logic to build this dict, override the :meth:`~engineer.plugins.JinjaEnvironmentPlugin.get_globals` method.
"""
@classmethod
@classmethod
@classmethod
def update_environment(cls, jinja_env):
"""
For complete customization of the Jinja environment, subclasses can override this method.
Subclasses should ensure that the base implementation is called first in their overridden implementation. For
example:
.. code-block:: python
@classmethod
def update_environment(cls, jinja_env):
super(BundledFilters, cls).update_environment(jinja_env)
# some other code here...
:param jinja_env: The Jinja environment.
"""
cls._add_filters(jinja_env)
cls._add_globals(jinja_env)
@classmethod
def get_filters(cls):
"""
If required, subclasses can override this method to return a dict of filters to add to the Jinja environment.
The default implementation simply returns :attr:`~engineer.plugins.JinjaEnvironmentPlugin.filters`.
"""
return cls.filters
@classmethod
def get_globals(cls):
"""
If required, subclasses can override this method to return a dict of functions to add to the Jinja
environment globally. The default implementation simply
returns :attr:`~engineer.plugins.JinjaEnvironmentPlugin.globals`.
"""
return cls.globals
| 44.05414 | 120 | 0.683583 | # coding=utf-8
import logging
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
# Adapted from Marty Alchin: http://martyalchin.com/2008/jan/10/simple-plugin-framework/
def find_plugins(entrypoint):
try:
import pkg_resources
except ImportError:
pkg_resources = None
if pkg_resources is None:
return
for entrypoint in pkg_resources.iter_entry_points(entrypoint):
yield entrypoint.name, entrypoint.load()
#noinspection PyUnresolvedReferences,PyUnusedLocal
def load_plugins():
"""Load all plugins."""
# Ensure the built-in plugins are loaded by importing the module
from engineer.plugins import bundled
# Load registered plugin modules
for name, module in find_plugins('engineer.plugins'):
# No need to import the module manually because find_plugins will do that.
pass
def get_all_plugin_types():
return ThemeProvider, PostProcessor, CommandPlugin, JinjaEnvironmentPlugin
#noinspection PyMissingConstructor,PyUnusedLocal
class PluginMount(type):
"""A metaclass used to identify :ref:`plugins`."""
def __init__(cls, name, bases, attrs):
if not hasattr(cls, 'plugins'):
# This branch only executes when processing the mount point itself.
# So, since this is a new plugin type, not an implementation, this
# class shouldn't be registered as a plugin. Instead, it sets up a
# list where plugins can be registered later.
cls.plugins = []
else:
# This must be a plugin implementation, which should be registered.
# Simply appending it to the list is all that's needed to keep
# track of it later.
cls.plugins.append(cls)
class PluginMixin(object):
@classmethod
def get_name(cls):
return '.'.join([cls.__module__, cls.__name__])
@classmethod
def get_logger(cls):
"""Returns a logger for the plugin."""
return logging.getLogger(cls.get_name())
@classmethod
def handle_settings(cls, config_dict, settings):
"""
If a plugin defines its own settings, it may also need to handle those settings in some unique way when the
Engineer configuration files are being read. By overriding this method,
plugins can ensure such unique handling of their settings is done.
Note that a plugin does not have to handle its own settings unless there is unique processing that must be
done. Any settings that are unknown to Engineer will automatically be added as attributes on the
:class:`~engineer.conf.EngineerConfiguration` object. This method should only be implemented if the settings
must be processed in some more complicated way prior to being added to the global configuration object.
Implementations of this method should check for the plugin-specific settings in ``config_dict`` and set
appropriate attributes/properties on the ``settings`` object. In addition, settings that
have been handled should be removed from ``config_dict``. This ensures they are not handled by
other plugins or the default Engineer code.
:param config_dict: The dict of as-yet unhandled settings in the current settings file.
:param settings: The global :class:`~engineer.conf.EngineerConfiguration` object that contains all the
settings for the current Engineer process. Any custom settings should be added to this object.
:returns: The modified ``config_dict`` object.
"""
return config_dict
class ThemeProvider(PluginMixin):
"""
Base class for Theme :ref:`plugins`.
ThemeProvider subclasses must provide a value for :attr:`~engineer.plugins.ThemeProvider.paths`.
.. versionchanged:: 0.3.0
"""
__metaclass__ = PluginMount
paths = () # empty tuple
"""An iterable of absolute paths containing one or more :ref:`theme manifests <theme manifest>`."""
class PostProcessor(PluginMixin):
"""
Base class for Post Processor :ref:`plugins`.
PostProcessor subclasses should provide implementations for :meth:`~engineer.plugins.PostProcessor.preprocess` or
:meth:`~engineer.plugins.PostProcessor.postprocess` (or both) as appropriate.
"""
__metaclass__ = PluginMount
@classmethod
def preprocess(cls, post, metadata):
"""
The ``preprocess`` method is called during the Post import process, before any post metadata defaults
have been set.
The preprocess method should use the ``content_preprocessed`` attribute to get/modify the content of *post*.
This ensures that preprocessors from other plugins can be chained together.
By default, the ``content_preprocessed`` value is used only
for generating post HTML. It is not written back to the source post file. However, sometimes you may want
to make a permanent change to the post content that is written out. In this case, you should call the
:meth:`~engineer.models.Post.set_finalized_content` method, passing it the modified content. This
method will ensure the data is written back to the source file by the :ref:`metadata finalization` plugin.
This means that in order for a plugin to write preprocessed data back to the post file,
the :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must be
enabled.
Your plugin will also need to be explicitly granted the ``MODIFY_RAW_POST`` permission. See more
detail in :ref:`plugin permissions`.
In addition, the preprocess method can add/remove/update properties on the *post* object itself as needed.
.. tip::
Since the :attr:`~engineer.conf.EngineerConfiguration.FINALIZE_METADATA` setting must be enabled for
plugins to write back to source post files, you should check this setting in addition to any other
settings you may be using.
:param post: The post being currently processed by Engineer.
:param metadata: A dict of the post metadata contained in the post source file. It contains no
default values - only the values contained within the post source file itself. The preprocess method can
add, update, or otherwise manipulate metadata prior to it being processed by Engineer manipulating this
parameter.
:return: The *post* and *metadata* values should be returned (as a 2-tuple) by the method.
"""
return post, metadata
@classmethod
def postprocess(cls, post):
"""
The ``postprocess`` method is called after the post has been imported and processed as well as converted to
HTML and output.
:param post: The post being currently processed by Engineer.
:return: The *post* parameter should be returned.
"""
return post
class CommandPlugin(PluginMixin):
"""
Base class for Command :ref:`plugins`.
Command plugins add new commands to the :ref:`cmdline`. CommandPlugin subclasses must provide an implementation
for :meth:`~engineer.plugins.CommandPlugin.add_command`, and can optionally override
the :meth:`~engineer.plugins.CommandPlugin.active` classmethod to determine whether or not the plugin should
actually be loaded.
.. note::
Because Engineer uses :mod:`argparse` for parsing out its commands, you should be somewhat familiar with
it in order to implement a Command plugin.
.. seealso:: :ref:`command plugin examples`
"""
__metaclass__ = PluginMount
@classmethod
def active(cls):
"""
If this method returns ``False``, the plugin will not run and any commands added by the plugin will not
be available.
This method can be overridden to make commands available only if certain criteria are met (for example,
a custom :ref:`setting<settings>`).
:return: A boolean value indicating whether or not the plugin is active and should run. Default
implementation always returns ``True``.
"""
return True
@classmethod
def add_command(cls, subparser, main_parser, common_parser):
"""
This method is called by Engineer while it is building its :class:`~argparse.ArgumentParser`,
allowing one to add addition parsers and subparsers to supplement the core :ref:`Engineer commands<cmdline>`.
:param subparser:
Since Engineer's built-in commands are subparsers, :meth:`~argparse.ArgumentParser.add_subparsers` is
called to generate a subparser. :mod:`argparse` only supports
calling :meth:`~argparse.ArgumentParser.add_subparsers` once, so the subparser object itself (the result
of the initial :meth:`~argparse.ArgumentParser.add_subparsers` call Engineer made when building its
parser) is passed in this parameter. This allows you to add either another top-level command by calling
``add_parser()`` then adding arguments directly, or to create further nested commands by adding a parser
with additional subparsers within it.
:param main_parser:
The top level :class:`~argparse.ArgumentParser` used by Engineer. This is generally only useful if you're
using an :mod:`argparse` wrapper library such as `argh <http://packages.python.org/argh/index.html>`_ in
your plugin. Most wrapper libraries require the root :class:`~argparse.ArgumentParser` object to add their
subparsers to. If you're using :mod:`argparse` directly, you can ignore this parameter and work with
the ``subparser`` parameter exclusively.
:param common_parser:
Engineer provides several :ref:`common arguments<engineer>` for its commands. If you wish to makes these
arguments available for your custom commands, you should pass ``common_parser`` in
to ``add_parser()`` via the ``parents`` parameter.
"""
raise NotImplementedError()
class JinjaEnvironmentPlugin(PluginMixin):
"""
Base class for JinjaEnvironment :ref:`plugins`.
JinjaEnvironment plugins can supplement the Jinja 2 environment with things like filters and global
functions. These additions can then be used in your Jinja templates.
.. versionadded:: 0.5.0
"""
__metaclass__ = PluginMount
filters = {}
"""
A dict of filters to add to the Jinja environment. The key of each entry should be the name of the filter (as it
will be used inside templates), while the value should be the filter function. If you require more custom logic
to build the dict of filters, override the :meth:`~engineer.plugins.JinjaEnvironmentPlugin.get_filters` method.
"""
globals = {}
"""
A dict of functions to add to the Jinja environment globally. The key of each entry should be the name of the
function (as it will be used inside templates), while the value should be the function itself. If you require more
custom logic to build this dict, override the :meth:`~engineer.plugins.JinjaEnvironmentPlugin.get_globals` method.
"""
@classmethod
def _add_filters(cls, jinja_env):
logger = cls.get_logger()
filters = cls.get_filters()
for filter_name, filter_function in filters.iteritems():
if filter_name in jinja_env.filters:
logger.warning("Jinja filter name conflict. "
"A plugin is trying to add a filter with a name that conflicts with an existing filter. "
"Filter name: %s" % filter_name)
else:
jinja_env.filters[filter_name] = filter_function
logger.debug("Registered Jinja filter: %s" % filter_name)
@classmethod
def _add_globals(cls, jinja_env):
logger = cls.get_logger()
global_list = cls.get_globals()
for global_name, the_global in global_list.iteritems():
if global_name in jinja_env.globals:
logger.warning("Jinja global name conflict. "
"A plugin is trying to add a global with a name that conflicts with an existing global. "
"Global name: %s" % global_name)
else:
jinja_env.globals[global_name] = the_global
logger.debug("Registered Jinja global: %s" % global_name)
@classmethod
def update_environment(cls, jinja_env):
"""
For complete customization of the Jinja environment, subclasses can override this method.
Subclasses should ensure that the base implementation is called first in their overridden implementation. For
example:
.. code-block:: python
@classmethod
def update_environment(cls, jinja_env):
super(BundledFilters, cls).update_environment(jinja_env)
# some other code here...
:param jinja_env: The Jinja environment.
"""
cls._add_filters(jinja_env)
cls._add_globals(jinja_env)
@classmethod
def get_filters(cls):
"""
If required, subclasses can override this method to return a dict of filters to add to the Jinja environment.
The default implementation simply returns :attr:`~engineer.plugins.JinjaEnvironmentPlugin.filters`.
"""
return cls.filters
@classmethod
def get_globals(cls):
"""
If required, subclasses can override this method to return a dict of functions to add to the Jinja
environment globally. The default implementation simply
returns :attr:`~engineer.plugins.JinjaEnvironmentPlugin.globals`.
"""
return cls.globals
| 2,217 | 1,767 | 148 |
d843a9bfff16e3879512c66396513cbd209aeaee | 1,488 | py | Python | main.py | gaato/nkodice-discord | d693b0db37f1ca3480fd4de964434d9800b06a12 | [
"Apache-2.0"
] | null | null | null | main.py | gaato/nkodice-discord | d693b0db37f1ca3480fd4de964434d9800b06a12 | [
"Apache-2.0"
] | null | null | null | main.py | gaato/nkodice-discord | d693b0db37f1ca3480fd4de964434d9800b06a12 | [
"Apache-2.0"
] | null | null | null | import random
import discord
import asyncio
from config import DISCORD_TOKEN
client = discord.Client()
@client.event
@client.event
client.run(DISCORD_TOKEN)
| 24.8 | 102 | 0.584005 | import random
import discord
import asyncio
from config import DISCORD_TOKEN
client = discord.Client()
@client.event
async def on_ready():
print('It\'s activated')
async def nkodice(message: discord.Message):
rolls = random.choices(('う', 'ま', 'ち', 'ん', 'こ', 'お'), k=5)
roles = []
if rolls.count('う') >= 1 and rolls.count('ん') >= 1 and rolls.count('ち'):
roles.append('UNCHI')
if rolls.count('う') >= 1 and rolls.count('ん') >= 1 and rolls.count('こ'):
roles.append('UNKO')
if rolls.count('お') >= 1 and rolls.count('ま') >= 1 and rolls.count('ん') >= 1 and rolls.count('こ'):
roles.append('OMANKO')
elif rolls.count('ま') >= 1 and rolls.count('ん') >= 1 and rolls.count('こ'):
roles.append('MANKO')
if rolls.count('ち') >= 1 and rolls.count('ん') and rolls.count('こ'):
roles.append('CHINKO')
if rolls.count('お') >= 1 and rolls.count('ち') >= 2 and rolls.count('ん') >= 2:
roles.append('OCHINCHIN')
elif rolls.count('ち') >= 2 and rolls.count('ん') >= 2:
roles.append('CHINCHIN')
for r in rolls:
await message.channel.send(r)
await asyncio.sleep(1.0)
for r in roles:
await message.channel.send('***' + r + '***')
await asyncio.sleep(1.0)
@client.event
async def on_message(message: discord.Message):
if message.author.bot:
return
if message.content == '!nkodice':
await nkodice(message)
client.run(DISCORD_TOKEN)
| 1,309 | 0 | 67 |
2340b3e5ef8a55bff3aa10e3481dcf91615e104d | 913 | py | Python | lesson_fifth/models.py | erdyneevzt/courses_django | 8b8baf58f30a4cbf91a46ce3709b87abed70d230 | [
"MIT"
] | null | null | null | lesson_fifth/models.py | erdyneevzt/courses_django | 8b8baf58f30a4cbf91a46ce3709b87abed70d230 | [
"MIT"
] | null | null | null | lesson_fifth/models.py | erdyneevzt/courses_django | 8b8baf58f30a4cbf91a46ce3709b87abed70d230 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 32.607143 | 128 | 0.694414 | from django.db import models
# Create your models here.
class Author1(models.Model):
CHOISES_FOR_CITY = (
('Moscow',"Москва"),
('Saint-Petersburgh', "Санкт-Петербург"),
('Novgorod',"Новогород"),
('Tomsk', "Томск"),
)
name = models.CharField(max_length=200,verbose_name="Имя автора")
surname = models.CharField(max_length=200,verbose_name="Фамилия автора")
city = models.CharField(choices=CHOISES_FOR_CITY, max_length=200,verbose_name="Город", help_text="Выберите город со списка")
def __str__(self):
return 'Имя %s' %self.name
class Article(models.Model):
author = models.ForeignKey(Author1, verbose_name="Автор статьи", on_delete=models.CASCADE)
title = models.CharField(max_length=100,verbose_name="Заголовок")
text = models.TextField(max_length=500,verbose_name="Текст статьи")
def __str__(self):
return self.title | 58 | 868 | 46 |
32bb733d5b3608f348a3b6a17e4a5a8ff90017dc | 3,432 | py | Python | tests/test_cmip5.py | csiro-dcfp/intake-esm | eaaed45636aa8f831bdef9396ba4eb162ea1ca65 | [
"Apache-2.0"
] | null | null | null | tests/test_cmip5.py | csiro-dcfp/intake-esm | eaaed45636aa8f831bdef9396ba4eb162ea1ca65 | [
"Apache-2.0"
] | null | null | null | tests/test_cmip5.py | csiro-dcfp/intake-esm | eaaed45636aa8f831bdef9396ba4eb162ea1ca65 | [
"Apache-2.0"
] | null | null | null | import os
import intake
import pandas as pd
import pytest
import xarray as xr
from intake_esm import config
here = os.path.abspath(os.path.dirname(__file__))
@pytest.mark.parametrize(
'chunks, expected_chunks',
[
({'time': 1, 'lat': 2, 'lon': 2}, (1, 1, 2, 2)),
({'time': 2, 'lat': 1, 'lon': 1}, (1, 2, 1, 1)),
],
)
| 35.75 | 92 | 0.628205 | import os
import intake
import pandas as pd
import pytest
import xarray as xr
from intake_esm import config
here = os.path.abspath(os.path.dirname(__file__))
def test_build_collection_file():
with config.set({'database-directory': './tests/test_collections'}):
collection_input_definition = os.path.join(here, 'cmip5_collection_input_test.yml')
col = intake.open_esm_metadatastore(
collection_input_definition=collection_input_definition, overwrite_existing=True
)
assert isinstance(col.df, pd.DataFrame)
def test_build_collection_dict():
with config.set({'database-directory': './tests/test_collections'}):
collection_definition = {
'name': 'cmip5_test_collection',
'collection_type': 'cmip5',
'data_sources': {
'root_dir': {
'name': 'GLADE',
'loc_type': 'posix',
'direct_access': True,
'urlpath': './tests/sample_data/cmip/cmip5',
'exclude_dirs': ['*/files/*', 'latest'],
}
},
}
col = intake.open_esm_metadatastore(
collection_input_definition=collection_definition, overwrite_existing=True
)
assert isinstance(col.df, pd.DataFrame)
def test_search():
with config.set({'database-directory': './tests/test_collections'}):
c = intake.open_esm_metadatastore(collection_name='cmip5_test_collection')
cat = c.search(model=['CanESM2', 'CSIRO-Mk3-6-0'])
assert isinstance(cat.query_results, pd.DataFrame)
assert not cat.query_results.empty
def test_cat():
with config.set({'database-directory': './tests/test_collections'}):
cat = intake.open_catalog(os.path.join(here, 'cmip5_catalog.yaml'))
cat = cat['cmip5_test_collection_b4cf52c3-4879-44c6-955e-f341b1f9b2d9']
assert isinstance(cat.query_results, pd.DataFrame)
def test_to_xarray_cmip_empty():
with config.set({'database-directory': './tests/test_collections'}):
c = intake.open_esm_metadatastore(collection_name='cmip5_test_collection')
cat = c.search(
model='CanESM2',
experiment='rcp85',
frequency='mon',
modeling_realm='atmos',
ensemble_member='r2i1p1',
)
with pytest.raises(ValueError):
cat.to_xarray()
@pytest.mark.parametrize(
'chunks, expected_chunks',
[
({'time': 1, 'lat': 2, 'lon': 2}, (1, 1, 2, 2)),
({'time': 2, 'lat': 1, 'lon': 1}, (1, 2, 1, 1)),
],
)
def test_to_xarray_cmip(chunks, expected_chunks):
with config.set({'database-directory': './tests/test_collections'}):
c = intake.open_esm_metadatastore(collection_name='cmip5_test_collection')
cat = c.search(
variable=['hfls'], frequency='mon', modeling_realm='atmos', model=['CNRM-CM5']
)
dset = cat.to_xarray(decode_times=True, chunks=chunks)
ds = dset['CNRM-CERFACS.CNRM-CM5.historical.mon.atmos']
assert ds['hfls'].data.chunksize == expected_chunks
# Test for data from multiple institutions
cat = c.search(variable=['hfls'], frequency='mon', modeling_realm='atmos')
ds = cat.to_xarray(decode_times=False, chunks=chunks)
assert isinstance(ds, dict)
assert 'CCCma.CanCM4.historical.mon.atmos' in ds.keys()
| 2,941 | 0 | 137 |
6821010700887ff1ebd588f7645ccb249b198f69 | 46,905 | py | Python | venv/lib/python3.8/site-packages/azureml/data/tabular_dataset.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/data/tabular_dataset.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/data/tabular_dataset.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Contains functionality for representing data in a tabular format by parsing the provided file or list of files.
For more information, see the article [Add & register
datasets](https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets).
To get started working with a tabular dataset, see https://aka.ms/tabulardataset-samplenotebook.
"""
import warnings
from datetime import datetime, timedelta
from azureml._common.exceptions import AzureMLException
from azureml.data.constants import _PUBLIC_API, _DATASET_PROP_TIMESTAMP_FINE, _DATASET_PROP_TIMESTAMP_COARSE, \
_DEPRECATED_TIMESTAMP_NAME, _DEPRECATED_PARTITION_TIMESTAMP_NAME, _ACTION_TYPE_PROFILE, \
_LEGACY_DATASET_ID, _TIMESERIES_WITH_TIMESTAMP_COLUMN_ACTIVITY, \
_TIMESERIES_BEFORE_ACTIVITY, _TIMESERIES_AFTER_ACTIVITY, _TIMESERIES_BETWEEN_ACTIVITY, \
_TIMESERIES_RECENT_ACTIVITY, _HALF_SECOND, _PATITION_BY_ACTIVITY
from azureml.data.dataset_error_handling import _validate_has_data, _validate_has_columns, _try_execute
from azureml.data.abstract_dataset import AbstractDataset
from azureml.data._dataprep_helper import dataprep, get_dataflow_for_execution, get_dataflow_with_meta_flags
from azureml.data._dataset_rest_helper import _restclient, _custom_headers
from azureml.data._loggerfactory import track, _LoggerFactory, collect_datasets_usage
from azureml._base_sdk_common._docstring_wrapper import experimental
from azureml.exceptions import UserErrorException, DatasetTimestampMissingError
_logger = None
class TabularDataset(AbstractDataset):
"""Represents a tabular dataset to use in Azure Machine Learning.
A TabularDataset defines a series of lazily-evaluated, immutable operations to load data from the
data source into tabular representation. Data is not loaded from the source until TabularDataset
is asked to deliver data.
TabularDataset is created using methods like
:func:`azureml.data.dataset_factory.TabularDatasetFactory.from_delimited_files` from the
:class:`azureml.data.dataset_factory.TabularDatasetFactory` class.
For more information, see the article `Add & register
datasets <https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets>`_.
To get started working with a tabular dataset, see https://aka.ms/tabulardataset-samplenotebook.
.. remarks::
A TabularDataset can be created from CSV, TSV, Parquet files, or SQL query using the ``from_*``
methods of the :class:`azureml.data.dataset_factory.TabularDatasetFactory` class. You can
perform subsetting operations on a TabularDataset like splitting, skipping, and filtering records.
The result of subsetting is always one or more new TabularDataset objects.
You can also convert a TabularDataset into other formats like a pandas DataFrame.
The actual data loading happens when TabularDataset is asked to deliver the data into another
storage mechanism (e.g. a Pandas Dataframe, or a CSV file).
TabularDataset can be used as input of an experiment run. It can also be registered to workspace
with a specified name and be retrieved by that name later.
"""
def __init__(self):
"""Initialize a TabularDataset object.
This constructor is not supposed to be invoked directly. Dataset is intended to be created using
:class:`azureml.data.dataset_factory.TabularDatasetFactory` class.
"""
super().__init__()
@property
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def timestamp_columns(self):
"""Return the timestamp columns.
:return: The column names for timestamp (used to be referred as fine_grain_timestamp) and partition_timestamp
(used to be referred as coarse grain timestamp) defined for the dataset.
:rtype: (str, str)
"""
timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_FINE, None)
partition_timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_COARSE, None)
return (timestamp, partition_timestamp)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def with_timestamp_columns(self, timestamp=None, partition_timestamp=None, validate=False, **kwargs):
"""Define timestamp columns for the dataset.
.. remarks::
The method defines columns to be used as timestamps. Timestamp columns on a dataset make it possible
to treat the data as time-series data and enable additional capabilities. When a dataset has
both ``timestamp (used to be referred as fine_grain_timestamp)`` and ``partition_timestamp (used to be
referred as coarse grain timestamp)`` specified, the two columns should represent the same timeline.
:param timestamp: The name of column as timestamp (used to be referred as fine_grain_timestamp) (optional).
The default is None(clear).
:type timestamp: str
:param partition_timestamp: The name of column partition_timestamp (used to be referred as coarse grain
timestamp) (optional). The default is None(clear).
:type partition_timestamp: str
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is False.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: Returns a new TabularDataset with timestamp columns defined.
:rtype: azureml.data.TabularDataset
"""
fine_grain_timestamp = kwargs.get(_DEPRECATED_TIMESTAMP_NAME, None)
coarse_grain_timestamp = kwargs.get(_DEPRECATED_PARTITION_TIMESTAMP_NAME, None)
if fine_grain_timestamp:
warnings.warn("fine_grain_timestamp is deprecated, use timestamp.", DeprecationWarning)
if coarse_grain_timestamp:
warnings.warn("coarse_grain_timestamp is deprecated, use partition_timestamp.", DeprecationWarning)
if (timestamp or partition_timestamp) and (fine_grain_timestamp or coarse_grain_timestamp):
raise UserErrorException('fine_grain_timestamp and coarse_grain_timestamp have been replaced by '
'timestamp and partition_timestamp parameters and cannot be used together.')
if not timestamp and partition_timestamp:
raise UserErrorException('partition_timestamp can be assigned only if timestamp is assigned.')
if timestamp and timestamp == partition_timestamp:
raise UserErrorException('partition_timestamp cannot be the same as timestamp.')
if not fine_grain_timestamp and coarse_grain_timestamp:
raise UserErrorException('coarse_grain_timestamp can be assigned only if fine_grain_timestamp is '
'assigned.')
if fine_grain_timestamp and fine_grain_timestamp == coarse_grain_timestamp:
raise UserErrorException('coarse_grain_timestamp cannot be the same as fine_grain_timestamp.')
if validate:
self._validate_timestamp_columns([fine_grain_timestamp, coarse_grain_timestamp])
if timestamp:
fine_grain_timestamp = timestamp
coarse_grain_timestamp = partition_timestamp
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_WITH_TIMESTAMP_COLUMN_ACTIVITY,
[self], self._registration.workspace, "N/A")
dataset = TabularDataset._create(self._dataflow, self._properties, telemetry_info=self._telemetry_info)
if fine_grain_timestamp:
dataset._properties[_DATASET_PROP_TIMESTAMP_FINE] = fine_grain_timestamp
else:
if _DATASET_PROP_TIMESTAMP_FINE in self._properties:
del dataset._properties[_DATASET_PROP_TIMESTAMP_FINE]
if coarse_grain_timestamp:
dataset._properties[_DATASET_PROP_TIMESTAMP_COARSE] = coarse_grain_timestamp
else:
if _DATASET_PROP_TIMESTAMP_COARSE in self._properties:
del dataset._properties[_DATASET_PROP_TIMESTAMP_COARSE]
return dataset
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_pandas_dataframe(self, on_error='null', out_of_range_datetime='null'):
"""Load all records from the dataset into a pandas DataFrame.
:param on_error: How to handle any error values in the dataset, such as those produced by an error while
parsing values. Valid values are 'null' which replaces them with null; and 'fail' which will result in
an exception.
:param out_of_range_datetime: How to handle date-time values that are outside the range supported by Pandas.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:return: Returns a pandas DataFrame.
:rtype: pandas.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_pandas_dataframe', 'TabularDataset')
df = _try_execute(lambda: dataflow.to_pandas_dataframe(on_error=on_error,
out_of_range_datetime=out_of_range_datetime),
'to_pandas_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
fine_grain_timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_FINE, None)
if fine_grain_timestamp is not None and df.empty is False:
df.set_index(fine_grain_timestamp, drop=False, inplace=True)
df.index.rename(None, inplace=True)
return df
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_dask_dataframe(self, sample_size=10000, dtypes=None, on_error='null', out_of_range_datetime='null'):
"""Return a Dask DataFrame that can lazily read the data in the dataset.
:param sample_size: The number of records to read to determine schema and types.
:param dtypes: An optional dict specifying the expected columns and their dtypes.
`sample_size` is ignored if this is provided.
:param on_error: How to handle any error values in the dataset,
such as those produced by an error while parsing values.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:param out_of_range_datetime: How to handle date-time values that are outside the range supported by Pandas.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:return: dask.dataframe.core.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_dask_dataframe', 'TabularDataset')
dd = _try_execute(lambda: dataflow.to_dask_dataframe(sample_size=sample_size,
dtypes=dtypes,
on_error=on_error,
out_of_range_datetime=out_of_range_datetime),
'to_dask_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
return dd
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_spark_dataframe(self):
"""Load all records from the dataset into a Spark DataFrame.
:return: Returns a Spark DataFrame.
:rtype: pyspark.sql.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_spark_dataframe', 'TabularDataset')
return _try_execute(dataflow.to_spark_dataframe,
'to_spark_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def skip(self, count):
"""Skip records from top of the dataset by the specified count.
:param count: The number of records to skip.
:type count: int
:return: Returns a new TabularDataset object representing a dataset with records skipped.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.skip(count), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def take(self, count):
"""Take a sample of records from top of the dataset by the specified count.
:param count: The number of records to take.
:type count: int
:return: Returns a new TabularDataset object representing the sampled dataset.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.take(count), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def take_sample(self, probability, seed=None):
"""Take a random sample of records in the dataset approximately by the probability specified.
:param probability: The probability of a record being included in the sample.
:type probability: float
:param seed: Optional seed to use for the random generator.
:type seed: int
:return: Returns a new TabularDataset object representing the sampled dataset.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.take_sample(probability, seed), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def random_split(self, percentage, seed=None):
"""Split records in the dataset into two parts randomly and approximately by the percentage specified.
The first dataset contains approximately ``percentage`` of the total records and the second dataset the
remaining records.
:param percentage: The approximate percentage to split the dataset by. This must be a number between
0.0 and 1.0.
:type percentage: float
:param seed: Optional seed to use for the random generator.
:type seed: int
:return: Returns a tuple of new TabularDataset objects representing the two datasets after the split.
:rtype: (azureml.data.TabularDataset, azureml.data.TabularDataset)
"""
dataflow1, dataflow2 = self._dataflow.random_split(percentage, seed)
return (
TabularDataset._create(dataflow1, self._properties, telemetry_info=self._telemetry_info),
TabularDataset._create(dataflow2, self._properties, telemetry_info=self._telemetry_info)
)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def keep_columns(self, columns, validate=False):
"""Keep the specified columns and drops all others from the dataset.
If a timeseries column is dropped, the corresponding capabilities will be dropped for the
returned dataset as well.
:param columns: The name or a list of names for the columns to keep.
:type columns: typing.Union[str, builtin.list[str]]
:param validate: Indicates whether to validate if data can be loaded from the returned dataset.
The default is False. Validation requires that the data source is accessible from current compute.
:type validate: bool
:return: Returns a new TabularDataset object with only the specified columns kept.
:rtype: azureml.data.TabularDataset
"""
dataflow = self._dataflow.keep_columns(columns, validate_column_exists=False)
if validate:
_validate_has_data(dataflow,
('Cannot load any data from the dataset with only columns {} kept. Make sure the '
'specified columns exist in the current dataset.')
.format(columns if isinstance(columns, list) else [columns]))
dataset = TabularDataset._create(dataflow, self._properties, telemetry_info=self._telemetry_info)
if isinstance(columns, str):
columns = [columns]
ts_cols = self.timestamp_columns
trait_dropped = None
if ts_cols[0] is not None:
if ts_cols[0] not in columns:
dataset = dataset.with_timestamp_columns(None)
trait_dropped = 'fine_grain_timestamp, coarse_grain_timestamp'
elif ts_cols[1] is not None and ts_cols[1] not in columns:
dataset = dataset.with_timestamp_columns(ts_cols[0])
trait_dropped = 'coarse_grain_timestamp'
if trait_dropped is not None:
_get_logger().info('Dropping trait ({0}) on dataset (id={1}) during keep_columns.'
.format(trait_dropped, self.id))
return dataset
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def partition_by(self, partition_keys, target, name=None, show_progress=True, partition_as_file_dataset=False):
"""Partitioned data will be copied and output to the destination specified by target.
create the dataset from the outputted data path with partition format, register dataset if name is provided,
return the dataset for the new data path with partitions
.. code-block:: python
ds = Dataset.get_by_name('test') # indexed by country, state, partition_date
# #1: call partition_by locally
new_ds = ds.partition_by(name="repartitioned_ds", partition_keys=['country'],
target=DataPath(datastore, "repartition"))
partition_keys = newds.partition_keys # ['country']
# new_ds can be passed to PRS as input dataset
:param partition_keys: Required, partition keys
:type partition_keys: builtin.list[str]
:param target: Required, the datastore path where the dataframe parquet data will be uploaded to.
A guid folder will be generated under the target path to avoid conflict.
:type target: azureml.data.datapath.DataPath, azureml.core.datastore.Datastore
or tuple(azureml.core.datastore.Datastore, str) object
:param name: Optional, The registration name.
:type name: str
:param show_progress: Optional, indicates whether to show progress of the upload in the console.
Defaults to be True.
:type show_progress: bool
:param partition_as_file_dataset: Optional, indicates whether returns a filedataset or not.
Defaults to be False.
:type show_progress: bool
:return: The saved or registered dataset.
:rtype: azureml.data.TabularDataset
"""
from uuid import uuid4
from azureml.exceptions import UserErrorException
from azureml.core import Dataset
from azureml.data.data_reference import DataReference
from azureml.data._dataset_factory_helper import get_progress_logger, parse_target
from azureml.dataprep import FieldType
from azureml.data.dataset_factory import TabularDatasetFactory
import time
starting_time = time.process_time()
console = get_progress_logger(show_progress)
console("Validating arguments.")
if len(partition_keys) == 0:
raise UserErrorException("partition_keys cannot be empty")
column_types = self._dataflow.dtypes
invalid_keys = []
for key in partition_keys:
if key not in column_types:
invalid_keys.append(key)
if len(invalid_keys) != 0:
raise UserErrorException("{0} are invalid partition keys".format(invalid_keys))
if len(partition_keys) != len(set(partition_keys)):
raise UserErrorException("partition_keys cannot have duplicates")
console("Arguments validated.")
guid = uuid4()
datastore, relative_path = parse_target(target)
relative_path_with_guid = "/%s/%s/" % (relative_path, guid)
partition_format = relative_path_with_guid
partition_path = relative_path_with_guid
saved_dataset_key_column_types = {}
for key in partition_keys:
if column_types[key] == FieldType.DATE:
partition_format = partition_format + '{' + key + ':yyyyMMddHHmmss}*/'
del column_types[key]
else:
partition_format = partition_format + '{' + key + '}/'
partition_path = partition_path + '*/'
if key in column_types:
saved_dataset_key_column_types[key] = column_types[key]
partition_format = partition_format + '*.parquet'
partition_path = partition_path + '*.parquet'
console("Uploading file to {}".format(relative_path_with_guid))
self._dataflow.write_to_parquet(partition_keys=partition_keys,
directory_path=DataReference(datastore=datastore).
path(relative_path_with_guid)).run_local()
console("Successfully uploaded file to datastore.")
console("Creating a new dataset.")
if partition_as_file_dataset:
saved_dataset = Dataset.File.\
from_files(path=(datastore, partition_path), partition_format=partition_format)
else:
saved_dataset = TabularDatasetFactory.\
from_parquet_files(path=(datastore, partition_path), partition_format=partition_format)
saved_dataset = TabularDataset._create(saved_dataset._dataflow.
set_column_types(saved_dataset_key_column_types),
self._properties, telemetry_info=self._telemetry_info)
console("Successfully created a new dataset.")
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _PATITION_BY_ACTIVITY,
[self], self._registration.workspace, "N/A",
{"execution_time": time.process_time() - starting_time,
"number_of_partition_keys": len(partition_keys)})
if name is None:
return saved_dataset
console("registering a new dataset.")
registered_dataset = saved_dataset.register(datastore.workspace, name, create_new_version=True)
console("Successfully created and registered a new dataset.")
return registered_dataset
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def filter(self, expression):
"""
Filter the data, leaving only the records that match the specified expression.
.. remarks::
Expressions are started by indexing the Dataset with the name of a column. They support a variety of
functions and operators and can be combined using logical operators. The resulting expression will be
lazily evaluated for each record when a data pull occurs and not where it is defined.
.. code-block:: python
dataset['myColumn'] > dataset['columnToCompareAgainst']
dataset['myColumn'].starts_with('prefix')
:param expression: The expression to evaluate.
:type expression: any
:return: The modified dataset (unregistered).
:rtype: azureml.data.TabularDataset
"""
dataflow = self._dataflow
dataflow = dataflow.filter(expression)
return TabularDataset._create(dataflow, self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def drop_columns(self, columns):
"""Drop the specified columns from the dataset.
If a timeseries column is dropped, the corresponding capabilities will be dropped for the
returned dataset as well.
:param columns: The name or a list of names for the columns to drop.
:type columns: typing.Union[str, builtin.list[str]]
:return: Returns a new TabularDataset object with the specified columns dropped.
:rtype: azureml.data.TabularDataset
"""
dataset = TabularDataset._create(
self._dataflow.drop_columns(columns), self._properties, telemetry_info=self._telemetry_info)
if isinstance(columns, str):
columns = [columns]
ts_cols = self.timestamp_columns
trait_dropped = None
if ts_cols[0] is not None:
if ts_cols[0] in columns:
dataset = dataset.with_timestamp_columns(None)
trait_dropped = 'fine_grain_timestamp, coarse_grain_timestamp'
elif ts_cols[1] is not None and ts_cols[1] in columns:
dataset = dataset.with_timestamp_columns(ts_cols[0])
trait_dropped = 'coarse_grain_timestamp'
if trait_dropped is not None:
_get_logger().info('Dropping trait ({0}) on dataset (id={1}) during drop_columns.'
.format(trait_dropped, self.id))
return dataset
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_parquet_files(self):
"""Convert the current dataset into a FileDataset containing Parquet files.
The resulting dataset will contain one or more Parquet files, each corresponding to a partition of data
from the current dataset. These files are not materialized until they are downloaded or read from.
:return: Returns a new FileDataset object with a set of Parquet files containing the data in this dataset.
:rtype: azureml.data.FileDataset
"""
from azureml.data.file_dataset import FileDataset
parquet_dataflow = self._dataflow.to_parquet_streams()
parquet_dataflow = get_dataflow_with_meta_flags(parquet_dataflow, file_projection='parquet')
return FileDataset._create(parquet_dataflow, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_csv_files(self, separator=','):
"""Convert the current dataset into a FileDataset containing CSV files.
The resulting dataset will contain one or more CSV files, each corresponding to a partition of data
from the current dataset. These files are not materialized until they are downloaded or read from.
:param separator: The separator to use to separate values in the resulting file.
:type separator: str
:return: Returns a new FileDataset object with a set of CSV files containing the data in this dataset.
:rtype: azureml.data.FileDataset
"""
from azureml.data.file_dataset import FileDataset
csv_dataflow = self._dataflow.to_csv_streams(separator=separator)
csv_dataflow = get_dataflow_with_meta_flags(csv_dataflow, file_projection='csv')
return FileDataset._create(csv_dataflow, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_before(self, end_time, include_boundary=True, validate=True):
"""Filter TabularDataset with time stamp columns before a specified end time.
:param end_time: Upper bound for filtering data.
:type end_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``end_time``) should be
included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_BEFORE_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_before.__name__,
upper_bound=end_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_after(self, start_time, include_boundary=True, validate=True):
"""Filter TabularDataset with time stamp columns after a specified start time.
:param start_time: The lower bound for filtering data.
:type start_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``start_time``) should be
included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_AFTER_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_after.__name__,
lower_bound=start_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_recent(self, time_delta, include_boundary=True, validate=True):
"""Filter TabularDataset to contain only the specified duration (amount) of recent data.
:param time_delta: The duration (amount) of recent data to retrieve.
:type time_delta: datetime.timedelta
:param include_boundary: Indicate if the row associated with the boundary time (``time_delta``)
should be included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_RECENT_ACTIVITY,
[self], self._registration.workspace, "N/A")
start_time = datetime.now() - time_delta
return self._time_filter(self.time_recent.__name__,
lower_bound=start_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_between(self, start_time, end_time, include_boundary=True, validate=True):
"""Filter TabularDataset between a specified start and end time.
:param start_time: The Lower bound for filtering data.
:type start_time: datetime.datetime
:param end_time: The upper bound for filtering data.
:type end_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``start_end`` and
``end_time``) should be included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_BETWEEN_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_between.__name__,
lower_bound=start_time,
upper_bound=end_time,
include_boundary=include_boundary,
validate=validate)
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def submit_profile_run(self, compute_target, experiment):
"""Submit an experimentation run to calculate data profile.
A data profile can be very useful to understand the input data, identify anomalies and missing values
by providing useful information about the data like column type, missing values, etc.
:param compute_target: The compute target to run the
profile calculation experiment on. Specify 'local' to use local compute.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.computetarget
for more information on compute targets.
:type compute_target: typing.Union[str, azureml.core.compute.ComputeTarget]
:param experiment: The experiment object.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment.experiment
for more information on experiments.
:type experiment: azureml.core.experiment.Experiment
:return: An object of type DatasetProfileRun class.
:rtype: azureml.data.dataset_profile_run.DatasetProfileRun
"""
from azureml.core import Experiment, ComputeTarget
if not (isinstance(compute_target, ComputeTarget) or isinstance(compute_target, str)):
raise UserErrorException('Invalid type. compute_target should be either of type ComputeTarget or string '
'but was found to be of type {0}.'.format(type(compute_target)))
if not isinstance(experiment, Experiment):
raise UserErrorException('Invalid type. experiment should be of type azureml.core.Experiment but '
'was found to be of type {0}.'.format(type(experiment)))
from azureml.data.dataset_profile_run_config import DatasetProfileRunConfig
dprc = DatasetProfileRunConfig(self, compute_target=compute_target)
profile_run = experiment.submit(dprc)
profile_run.run.wait_for_completion(raise_on_error=True, wait_post_processing=True)
return profile_run
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def get_profile(self, workspace=None):
"""Get data profile from the latest profile run submitted for this or the same dataset in the workspace.
:param workspace: The workspace where profile run was submitted. Defaults to the workspace of this dataset.
Required if dataset is not associated to a workspace.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace
for more information on workspaces.
:type workspace: azureml.core.Workspace
:return: Profile result from the latest profile run of type DatasetProfile.
:rtype: azureml.data.dataset_profile.DatasetProfile
"""
workspace = self._ensure_workspace(workspace)
saved_dataset_id = self._ensure_saved(workspace)
# arguments [{'generate_preview': 'True', 'row_count': '1000'}] are added to ensure
# that requestHash is same. The GenerateProfileWithPreview API add these arguments on service side.
# If any changes are made there, this should also be changed.
from azureml._restclient.models import ActionRequestDto
request_dto = ActionRequestDto(
action_type=_ACTION_TYPE_PROFILE,
saved_dataset_id=saved_dataset_id,
arguments={'generate_preview': 'True', 'row_count': '1000'})
action_result_dto = _restclient(workspace).dataset.get_action_result(
workspace.subscription_id,
workspace.resource_group,
workspace.name,
dataset_id=_LEGACY_DATASET_ID,
request=request_dto,
custom_headers=_custom_headers)
result_artifact_ids = action_result_dto.result_artifact_ids
if result_artifact_ids is None or len(result_artifact_ids) == 0:
raise AzureMLException('Unable to fetch profile results. Please submit a new profile run.')
result_artifact = result_artifact_ids[0]
from azureml._restclient.artifacts_client import ArtifactsClient
content = ArtifactsClient(workspace.service_context).download_artifact_contents_to_string(
*result_artifact.split("/", 2))
try:
from azureml.data.dataset_profile import DatasetProfile
profile = DatasetProfile(saved_dataset_id, action_result_dto.run_id, action_result_dto.experiment_name,
workspace, dataprep().DataProfile._from_json(content))
except Exception:
errormsg = 'Unable to fetch profile since profile result is corrupted. Please submit a new profile run.'
_get_logger().error(errormsg)
raise AzureMLException(errormsg)
return profile
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def get_profile_runs(self, workspace=None):
"""Return previous profile runs associated with this or same dataset in the workspace.
:param workspace: The workspace where profile run was submitted. Defaults to the workspace of this dataset.
Required if dataset is not associated to a workspace.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace
for more information on workspaces.
:type workspace: azureml.core.Workspace
:return: iterator object of type azureml.core.Run.
:rtype: iter(azureml.core.Run)
"""
workspace = self._ensure_workspace(workspace)
from azureml._restclient.models import ActionRequestDto
request_dto = ActionRequestDto(
action_type=_ACTION_TYPE_PROFILE,
saved_dataset_id=self._ensure_saved(workspace),
arguments={'generate_preview': 'True', 'row_count': '1000'})
continuation_token = None
paginated_action_dto_list = []
index = 0
while index == 0 or continuation_token is not None:
paginated_action_dto = _restclient(workspace).dataset.list_actions_from_request(
workspace.subscription_id,
workspace.resource_group,
workspace.name,
dataset_id=_LEGACY_DATASET_ID,
request=request_dto,
count=1000,
custom_headers=_custom_headers,
continuation_token=continuation_token)
index = index + 1
for item in paginated_action_dto.value:
paginated_action_dto_list.append(item)
continuation_token = paginated_action_dto.continuation_token
if not paginated_action_dto_list:
raise AzureMLException('Unable to find any run information. Please submit a new profile run.')
run_list = []
for item in paginated_action_dto_list:
flag = True
# This is done to ensure backward compatibility. Earlier we do not persist
# run_id for local runs. Hence for older runs run_id is empty.
if item.run_id is None:
continue
from azureml.core import Experiment, get_run
experiment = Experiment(workspace, item.experiment_name)
try:
run = get_run(experiment, item.run_id)
except Exception:
flag = False
if flag:
run_list.append(run)
return iter(run_list)
| 54.540698 | 120 | 0.666965 | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Contains functionality for representing data in a tabular format by parsing the provided file or list of files.
For more information, see the article [Add & register
datasets](https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets).
To get started working with a tabular dataset, see https://aka.ms/tabulardataset-samplenotebook.
"""
import warnings
from datetime import datetime, timedelta
from azureml._common.exceptions import AzureMLException
from azureml.data.constants import _PUBLIC_API, _DATASET_PROP_TIMESTAMP_FINE, _DATASET_PROP_TIMESTAMP_COARSE, \
_DEPRECATED_TIMESTAMP_NAME, _DEPRECATED_PARTITION_TIMESTAMP_NAME, _ACTION_TYPE_PROFILE, \
_LEGACY_DATASET_ID, _TIMESERIES_WITH_TIMESTAMP_COLUMN_ACTIVITY, \
_TIMESERIES_BEFORE_ACTIVITY, _TIMESERIES_AFTER_ACTIVITY, _TIMESERIES_BETWEEN_ACTIVITY, \
_TIMESERIES_RECENT_ACTIVITY, _HALF_SECOND, _PATITION_BY_ACTIVITY
from azureml.data.dataset_error_handling import _validate_has_data, _validate_has_columns, _try_execute
from azureml.data.abstract_dataset import AbstractDataset
from azureml.data._dataprep_helper import dataprep, get_dataflow_for_execution, get_dataflow_with_meta_flags
from azureml.data._dataset_rest_helper import _restclient, _custom_headers
from azureml.data._loggerfactory import track, _LoggerFactory, collect_datasets_usage
from azureml._base_sdk_common._docstring_wrapper import experimental
from azureml.exceptions import UserErrorException, DatasetTimestampMissingError
_logger = None
def _get_logger():
global _logger
if _logger is None:
_logger = _LoggerFactory.get_logger(__name__)
return _logger
class TabularDataset(AbstractDataset):
"""Represents a tabular dataset to use in Azure Machine Learning.
A TabularDataset defines a series of lazily-evaluated, immutable operations to load data from the
data source into tabular representation. Data is not loaded from the source until TabularDataset
is asked to deliver data.
TabularDataset is created using methods like
:func:`azureml.data.dataset_factory.TabularDatasetFactory.from_delimited_files` from the
:class:`azureml.data.dataset_factory.TabularDatasetFactory` class.
For more information, see the article `Add & register
datasets <https://docs.microsoft.com/azure/machine-learning/how-to-create-register-datasets>`_.
To get started working with a tabular dataset, see https://aka.ms/tabulardataset-samplenotebook.
.. remarks::
A TabularDataset can be created from CSV, TSV, Parquet files, or SQL query using the ``from_*``
methods of the :class:`azureml.data.dataset_factory.TabularDatasetFactory` class. You can
perform subsetting operations on a TabularDataset like splitting, skipping, and filtering records.
The result of subsetting is always one or more new TabularDataset objects.
You can also convert a TabularDataset into other formats like a pandas DataFrame.
The actual data loading happens when TabularDataset is asked to deliver the data into another
storage mechanism (e.g. a Pandas Dataframe, or a CSV file).
TabularDataset can be used as input of an experiment run. It can also be registered to workspace
with a specified name and be retrieved by that name later.
"""
def __init__(self):
"""Initialize a TabularDataset object.
This constructor is not supposed to be invoked directly. Dataset is intended to be created using
:class:`azureml.data.dataset_factory.TabularDatasetFactory` class.
"""
super().__init__()
@property
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def timestamp_columns(self):
"""Return the timestamp columns.
:return: The column names for timestamp (used to be referred as fine_grain_timestamp) and partition_timestamp
(used to be referred as coarse grain timestamp) defined for the dataset.
:rtype: (str, str)
"""
timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_FINE, None)
partition_timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_COARSE, None)
return (timestamp, partition_timestamp)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def with_timestamp_columns(self, timestamp=None, partition_timestamp=None, validate=False, **kwargs):
"""Define timestamp columns for the dataset.
.. remarks::
The method defines columns to be used as timestamps. Timestamp columns on a dataset make it possible
to treat the data as time-series data and enable additional capabilities. When a dataset has
both ``timestamp (used to be referred as fine_grain_timestamp)`` and ``partition_timestamp (used to be
referred as coarse grain timestamp)`` specified, the two columns should represent the same timeline.
:param timestamp: The name of column as timestamp (used to be referred as fine_grain_timestamp) (optional).
The default is None(clear).
:type timestamp: str
:param partition_timestamp: The name of column partition_timestamp (used to be referred as coarse grain
timestamp) (optional). The default is None(clear).
:type partition_timestamp: str
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is False.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: Returns a new TabularDataset with timestamp columns defined.
:rtype: azureml.data.TabularDataset
"""
fine_grain_timestamp = kwargs.get(_DEPRECATED_TIMESTAMP_NAME, None)
coarse_grain_timestamp = kwargs.get(_DEPRECATED_PARTITION_TIMESTAMP_NAME, None)
if fine_grain_timestamp:
warnings.warn("fine_grain_timestamp is deprecated, use timestamp.", DeprecationWarning)
if coarse_grain_timestamp:
warnings.warn("coarse_grain_timestamp is deprecated, use partition_timestamp.", DeprecationWarning)
if (timestamp or partition_timestamp) and (fine_grain_timestamp or coarse_grain_timestamp):
raise UserErrorException('fine_grain_timestamp and coarse_grain_timestamp have been replaced by '
'timestamp and partition_timestamp parameters and cannot be used together.')
if not timestamp and partition_timestamp:
raise UserErrorException('partition_timestamp can be assigned only if timestamp is assigned.')
if timestamp and timestamp == partition_timestamp:
raise UserErrorException('partition_timestamp cannot be the same as timestamp.')
if not fine_grain_timestamp and coarse_grain_timestamp:
raise UserErrorException('coarse_grain_timestamp can be assigned only if fine_grain_timestamp is '
'assigned.')
if fine_grain_timestamp and fine_grain_timestamp == coarse_grain_timestamp:
raise UserErrorException('coarse_grain_timestamp cannot be the same as fine_grain_timestamp.')
if validate:
self._validate_timestamp_columns([fine_grain_timestamp, coarse_grain_timestamp])
if timestamp:
fine_grain_timestamp = timestamp
coarse_grain_timestamp = partition_timestamp
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_WITH_TIMESTAMP_COLUMN_ACTIVITY,
[self], self._registration.workspace, "N/A")
dataset = TabularDataset._create(self._dataflow, self._properties, telemetry_info=self._telemetry_info)
if fine_grain_timestamp:
dataset._properties[_DATASET_PROP_TIMESTAMP_FINE] = fine_grain_timestamp
else:
if _DATASET_PROP_TIMESTAMP_FINE in self._properties:
del dataset._properties[_DATASET_PROP_TIMESTAMP_FINE]
if coarse_grain_timestamp:
dataset._properties[_DATASET_PROP_TIMESTAMP_COARSE] = coarse_grain_timestamp
else:
if _DATASET_PROP_TIMESTAMP_COARSE in self._properties:
del dataset._properties[_DATASET_PROP_TIMESTAMP_COARSE]
return dataset
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_pandas_dataframe(self, on_error='null', out_of_range_datetime='null'):
"""Load all records from the dataset into a pandas DataFrame.
:param on_error: How to handle any error values in the dataset, such as those produced by an error while
parsing values. Valid values are 'null' which replaces them with null; and 'fail' which will result in
an exception.
:param out_of_range_datetime: How to handle date-time values that are outside the range supported by Pandas.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:return: Returns a pandas DataFrame.
:rtype: pandas.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_pandas_dataframe', 'TabularDataset')
df = _try_execute(lambda: dataflow.to_pandas_dataframe(on_error=on_error,
out_of_range_datetime=out_of_range_datetime),
'to_pandas_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
fine_grain_timestamp = self._properties.get(_DATASET_PROP_TIMESTAMP_FINE, None)
if fine_grain_timestamp is not None and df.empty is False:
df.set_index(fine_grain_timestamp, drop=False, inplace=True)
df.index.rename(None, inplace=True)
return df
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_dask_dataframe(self, sample_size=10000, dtypes=None, on_error='null', out_of_range_datetime='null'):
"""Return a Dask DataFrame that can lazily read the data in the dataset.
:param sample_size: The number of records to read to determine schema and types.
:param dtypes: An optional dict specifying the expected columns and their dtypes.
`sample_size` is ignored if this is provided.
:param on_error: How to handle any error values in the dataset,
such as those produced by an error while parsing values.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:param out_of_range_datetime: How to handle date-time values that are outside the range supported by Pandas.
Valid values are 'null' which replaces them with null; and 'fail' which will result in an exception.
:return: dask.dataframe.core.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_dask_dataframe', 'TabularDataset')
dd = _try_execute(lambda: dataflow.to_dask_dataframe(sample_size=sample_size,
dtypes=dtypes,
on_error=on_error,
out_of_range_datetime=out_of_range_datetime),
'to_dask_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
return dd
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_spark_dataframe(self):
"""Load all records from the dataset into a Spark DataFrame.
:return: Returns a Spark DataFrame.
:rtype: pyspark.sql.DataFrame
"""
dataflow = get_dataflow_for_execution(self._dataflow, 'to_spark_dataframe', 'TabularDataset')
return _try_execute(dataflow.to_spark_dataframe,
'to_spark_dataframe',
None if self.id is None else {'id': self.id, 'name': self.name, 'version': self.version})
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def skip(self, count):
"""Skip records from top of the dataset by the specified count.
:param count: The number of records to skip.
:type count: int
:return: Returns a new TabularDataset object representing a dataset with records skipped.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.skip(count), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def take(self, count):
"""Take a sample of records from top of the dataset by the specified count.
:param count: The number of records to take.
:type count: int
:return: Returns a new TabularDataset object representing the sampled dataset.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.take(count), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def take_sample(self, probability, seed=None):
"""Take a random sample of records in the dataset approximately by the probability specified.
:param probability: The probability of a record being included in the sample.
:type probability: float
:param seed: Optional seed to use for the random generator.
:type seed: int
:return: Returns a new TabularDataset object representing the sampled dataset.
:rtype: azureml.data.TabularDataset
"""
return TabularDataset._create(
self._dataflow.take_sample(probability, seed), self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def random_split(self, percentage, seed=None):
"""Split records in the dataset into two parts randomly and approximately by the percentage specified.
The first dataset contains approximately ``percentage`` of the total records and the second dataset the
remaining records.
:param percentage: The approximate percentage to split the dataset by. This must be a number between
0.0 and 1.0.
:type percentage: float
:param seed: Optional seed to use for the random generator.
:type seed: int
:return: Returns a tuple of new TabularDataset objects representing the two datasets after the split.
:rtype: (azureml.data.TabularDataset, azureml.data.TabularDataset)
"""
dataflow1, dataflow2 = self._dataflow.random_split(percentage, seed)
return (
TabularDataset._create(dataflow1, self._properties, telemetry_info=self._telemetry_info),
TabularDataset._create(dataflow2, self._properties, telemetry_info=self._telemetry_info)
)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def keep_columns(self, columns, validate=False):
"""Keep the specified columns and drops all others from the dataset.
If a timeseries column is dropped, the corresponding capabilities will be dropped for the
returned dataset as well.
:param columns: The name or a list of names for the columns to keep.
:type columns: typing.Union[str, builtin.list[str]]
:param validate: Indicates whether to validate if data can be loaded from the returned dataset.
The default is False. Validation requires that the data source is accessible from current compute.
:type validate: bool
:return: Returns a new TabularDataset object with only the specified columns kept.
:rtype: azureml.data.TabularDataset
"""
dataflow = self._dataflow.keep_columns(columns, validate_column_exists=False)
if validate:
_validate_has_data(dataflow,
('Cannot load any data from the dataset with only columns {} kept. Make sure the '
'specified columns exist in the current dataset.')
.format(columns if isinstance(columns, list) else [columns]))
dataset = TabularDataset._create(dataflow, self._properties, telemetry_info=self._telemetry_info)
if isinstance(columns, str):
columns = [columns]
ts_cols = self.timestamp_columns
trait_dropped = None
if ts_cols[0] is not None:
if ts_cols[0] not in columns:
dataset = dataset.with_timestamp_columns(None)
trait_dropped = 'fine_grain_timestamp, coarse_grain_timestamp'
elif ts_cols[1] is not None and ts_cols[1] not in columns:
dataset = dataset.with_timestamp_columns(ts_cols[0])
trait_dropped = 'coarse_grain_timestamp'
if trait_dropped is not None:
_get_logger().info('Dropping trait ({0}) on dataset (id={1}) during keep_columns.'
.format(trait_dropped, self.id))
return dataset
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def partition_by(self, partition_keys, target, name=None, show_progress=True, partition_as_file_dataset=False):
"""Partitioned data will be copied and output to the destination specified by target.
create the dataset from the outputted data path with partition format, register dataset if name is provided,
return the dataset for the new data path with partitions
.. code-block:: python
ds = Dataset.get_by_name('test') # indexed by country, state, partition_date
# #1: call partition_by locally
new_ds = ds.partition_by(name="repartitioned_ds", partition_keys=['country'],
target=DataPath(datastore, "repartition"))
partition_keys = newds.partition_keys # ['country']
# new_ds can be passed to PRS as input dataset
:param partition_keys: Required, partition keys
:type partition_keys: builtin.list[str]
:param target: Required, the datastore path where the dataframe parquet data will be uploaded to.
A guid folder will be generated under the target path to avoid conflict.
:type target: azureml.data.datapath.DataPath, azureml.core.datastore.Datastore
or tuple(azureml.core.datastore.Datastore, str) object
:param name: Optional, The registration name.
:type name: str
:param show_progress: Optional, indicates whether to show progress of the upload in the console.
Defaults to be True.
:type show_progress: bool
:param partition_as_file_dataset: Optional, indicates whether returns a filedataset or not.
Defaults to be False.
:type show_progress: bool
:return: The saved or registered dataset.
:rtype: azureml.data.TabularDataset
"""
from uuid import uuid4
from azureml.exceptions import UserErrorException
from azureml.core import Dataset
from azureml.data.data_reference import DataReference
from azureml.data._dataset_factory_helper import get_progress_logger, parse_target
from azureml.dataprep import FieldType
from azureml.data.dataset_factory import TabularDatasetFactory
import time
starting_time = time.process_time()
console = get_progress_logger(show_progress)
console("Validating arguments.")
if len(partition_keys) == 0:
raise UserErrorException("partition_keys cannot be empty")
column_types = self._dataflow.dtypes
invalid_keys = []
for key in partition_keys:
if key not in column_types:
invalid_keys.append(key)
if len(invalid_keys) != 0:
raise UserErrorException("{0} are invalid partition keys".format(invalid_keys))
if len(partition_keys) != len(set(partition_keys)):
raise UserErrorException("partition_keys cannot have duplicates")
console("Arguments validated.")
guid = uuid4()
datastore, relative_path = parse_target(target)
relative_path_with_guid = "/%s/%s/" % (relative_path, guid)
partition_format = relative_path_with_guid
partition_path = relative_path_with_guid
saved_dataset_key_column_types = {}
for key in partition_keys:
if column_types[key] == FieldType.DATE:
partition_format = partition_format + '{' + key + ':yyyyMMddHHmmss}*/'
del column_types[key]
else:
partition_format = partition_format + '{' + key + '}/'
partition_path = partition_path + '*/'
if key in column_types:
saved_dataset_key_column_types[key] = column_types[key]
partition_format = partition_format + '*.parquet'
partition_path = partition_path + '*.parquet'
console("Uploading file to {}".format(relative_path_with_guid))
self._dataflow.write_to_parquet(partition_keys=partition_keys,
directory_path=DataReference(datastore=datastore).
path(relative_path_with_guid)).run_local()
console("Successfully uploaded file to datastore.")
console("Creating a new dataset.")
if partition_as_file_dataset:
saved_dataset = Dataset.File.\
from_files(path=(datastore, partition_path), partition_format=partition_format)
else:
saved_dataset = TabularDatasetFactory.\
from_parquet_files(path=(datastore, partition_path), partition_format=partition_format)
saved_dataset = TabularDataset._create(saved_dataset._dataflow.
set_column_types(saved_dataset_key_column_types),
self._properties, telemetry_info=self._telemetry_info)
console("Successfully created a new dataset.")
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _PATITION_BY_ACTIVITY,
[self], self._registration.workspace, "N/A",
{"execution_time": time.process_time() - starting_time,
"number_of_partition_keys": len(partition_keys)})
if name is None:
return saved_dataset
console("registering a new dataset.")
registered_dataset = saved_dataset.register(datastore.workspace, name, create_new_version=True)
console("Successfully created and registered a new dataset.")
return registered_dataset
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def filter(self, expression):
"""
Filter the data, leaving only the records that match the specified expression.
.. remarks::
Expressions are started by indexing the Dataset with the name of a column. They support a variety of
functions and operators and can be combined using logical operators. The resulting expression will be
lazily evaluated for each record when a data pull occurs and not where it is defined.
.. code-block:: python
dataset['myColumn'] > dataset['columnToCompareAgainst']
dataset['myColumn'].starts_with('prefix')
:param expression: The expression to evaluate.
:type expression: any
:return: The modified dataset (unregistered).
:rtype: azureml.data.TabularDataset
"""
dataflow = self._dataflow
dataflow = dataflow.filter(expression)
return TabularDataset._create(dataflow, self._properties, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def drop_columns(self, columns):
"""Drop the specified columns from the dataset.
If a timeseries column is dropped, the corresponding capabilities will be dropped for the
returned dataset as well.
:param columns: The name or a list of names for the columns to drop.
:type columns: typing.Union[str, builtin.list[str]]
:return: Returns a new TabularDataset object with the specified columns dropped.
:rtype: azureml.data.TabularDataset
"""
dataset = TabularDataset._create(
self._dataflow.drop_columns(columns), self._properties, telemetry_info=self._telemetry_info)
if isinstance(columns, str):
columns = [columns]
ts_cols = self.timestamp_columns
trait_dropped = None
if ts_cols[0] is not None:
if ts_cols[0] in columns:
dataset = dataset.with_timestamp_columns(None)
trait_dropped = 'fine_grain_timestamp, coarse_grain_timestamp'
elif ts_cols[1] is not None and ts_cols[1] in columns:
dataset = dataset.with_timestamp_columns(ts_cols[0])
trait_dropped = 'coarse_grain_timestamp'
if trait_dropped is not None:
_get_logger().info('Dropping trait ({0}) on dataset (id={1}) during drop_columns.'
.format(trait_dropped, self.id))
return dataset
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_parquet_files(self):
"""Convert the current dataset into a FileDataset containing Parquet files.
The resulting dataset will contain one or more Parquet files, each corresponding to a partition of data
from the current dataset. These files are not materialized until they are downloaded or read from.
:return: Returns a new FileDataset object with a set of Parquet files containing the data in this dataset.
:rtype: azureml.data.FileDataset
"""
from azureml.data.file_dataset import FileDataset
parquet_dataflow = self._dataflow.to_parquet_streams()
parquet_dataflow = get_dataflow_with_meta_flags(parquet_dataflow, file_projection='parquet')
return FileDataset._create(parquet_dataflow, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def to_csv_files(self, separator=','):
"""Convert the current dataset into a FileDataset containing CSV files.
The resulting dataset will contain one or more CSV files, each corresponding to a partition of data
from the current dataset. These files are not materialized until they are downloaded or read from.
:param separator: The separator to use to separate values in the resulting file.
:type separator: str
:return: Returns a new FileDataset object with a set of CSV files containing the data in this dataset.
:rtype: azureml.data.FileDataset
"""
from azureml.data.file_dataset import FileDataset
csv_dataflow = self._dataflow.to_csv_streams(separator=separator)
csv_dataflow = get_dataflow_with_meta_flags(csv_dataflow, file_projection='csv')
return FileDataset._create(csv_dataflow, telemetry_info=self._telemetry_info)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_before(self, end_time, include_boundary=True, validate=True):
"""Filter TabularDataset with time stamp columns before a specified end time.
:param end_time: Upper bound for filtering data.
:type end_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``end_time``) should be
included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_BEFORE_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_before.__name__,
upper_bound=end_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_after(self, start_time, include_boundary=True, validate=True):
"""Filter TabularDataset with time stamp columns after a specified start time.
:param start_time: The lower bound for filtering data.
:type start_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``start_time``) should be
included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_AFTER_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_after.__name__,
lower_bound=start_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_recent(self, time_delta, include_boundary=True, validate=True):
"""Filter TabularDataset to contain only the specified duration (amount) of recent data.
:param time_delta: The duration (amount) of recent data to retrieve.
:type time_delta: datetime.timedelta
:param include_boundary: Indicate if the row associated with the boundary time (``time_delta``)
should be included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_RECENT_ACTIVITY,
[self], self._registration.workspace, "N/A")
start_time = datetime.now() - time_delta
return self._time_filter(self.time_recent.__name__,
lower_bound=start_time,
include_boundary=include_boundary,
validate=validate)
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'}, activity_type=_PUBLIC_API)
def time_between(self, start_time, end_time, include_boundary=True, validate=True):
"""Filter TabularDataset between a specified start and end time.
:param start_time: The Lower bound for filtering data.
:type start_time: datetime.datetime
:param end_time: The upper bound for filtering data.
:type end_time: datetime.datetime
:param include_boundary: Indicate if the row associated with the boundary time (``start_end`` and
``end_time``) should be included.
:type include_boundary: bool
:param validate: Indicates whether to validate if specified columns exist in dataset. The default is True.
Validation requires that the data source is accessible from the current compute.
:type validate: bool
:return: A TabularDataset with the new filtered dataset.
:rtype: azureml.data.TabularDataset
"""
if self._registration and self._registration.workspace:
collect_datasets_usage(_get_logger(), _TIMESERIES_BETWEEN_ACTIVITY,
[self], self._registration.workspace, "N/A")
return self._time_filter(self.time_between.__name__,
lower_bound=start_time,
upper_bound=end_time,
include_boundary=include_boundary,
validate=validate)
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def submit_profile_run(self, compute_target, experiment):
"""Submit an experimentation run to calculate data profile.
A data profile can be very useful to understand the input data, identify anomalies and missing values
by providing useful information about the data like column type, missing values, etc.
:param compute_target: The compute target to run the
profile calculation experiment on. Specify 'local' to use local compute.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.computetarget
for more information on compute targets.
:type compute_target: typing.Union[str, azureml.core.compute.ComputeTarget]
:param experiment: The experiment object.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment.experiment
for more information on experiments.
:type experiment: azureml.core.experiment.Experiment
:return: An object of type DatasetProfileRun class.
:rtype: azureml.data.dataset_profile_run.DatasetProfileRun
"""
from azureml.core import Experiment, ComputeTarget
if not (isinstance(compute_target, ComputeTarget) or isinstance(compute_target, str)):
raise UserErrorException('Invalid type. compute_target should be either of type ComputeTarget or string '
'but was found to be of type {0}.'.format(type(compute_target)))
if not isinstance(experiment, Experiment):
raise UserErrorException('Invalid type. experiment should be of type azureml.core.Experiment but '
'was found to be of type {0}.'.format(type(experiment)))
from azureml.data.dataset_profile_run_config import DatasetProfileRunConfig
dprc = DatasetProfileRunConfig(self, compute_target=compute_target)
profile_run = experiment.submit(dprc)
profile_run.run.wait_for_completion(raise_on_error=True, wait_post_processing=True)
return profile_run
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def get_profile(self, workspace=None):
"""Get data profile from the latest profile run submitted for this or the same dataset in the workspace.
:param workspace: The workspace where profile run was submitted. Defaults to the workspace of this dataset.
Required if dataset is not associated to a workspace.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace
for more information on workspaces.
:type workspace: azureml.core.Workspace
:return: Profile result from the latest profile run of type DatasetProfile.
:rtype: azureml.data.dataset_profile.DatasetProfile
"""
workspace = self._ensure_workspace(workspace)
saved_dataset_id = self._ensure_saved(workspace)
# arguments [{'generate_preview': 'True', 'row_count': '1000'}] are added to ensure
# that requestHash is same. The GenerateProfileWithPreview API add these arguments on service side.
# If any changes are made there, this should also be changed.
from azureml._restclient.models import ActionRequestDto
request_dto = ActionRequestDto(
action_type=_ACTION_TYPE_PROFILE,
saved_dataset_id=saved_dataset_id,
arguments={'generate_preview': 'True', 'row_count': '1000'})
action_result_dto = _restclient(workspace).dataset.get_action_result(
workspace.subscription_id,
workspace.resource_group,
workspace.name,
dataset_id=_LEGACY_DATASET_ID,
request=request_dto,
custom_headers=_custom_headers)
result_artifact_ids = action_result_dto.result_artifact_ids
if result_artifact_ids is None or len(result_artifact_ids) == 0:
raise AzureMLException('Unable to fetch profile results. Please submit a new profile run.')
result_artifact = result_artifact_ids[0]
from azureml._restclient.artifacts_client import ArtifactsClient
content = ArtifactsClient(workspace.service_context).download_artifact_contents_to_string(
*result_artifact.split("/", 2))
try:
from azureml.data.dataset_profile import DatasetProfile
profile = DatasetProfile(saved_dataset_id, action_result_dto.run_id, action_result_dto.experiment_name,
workspace, dataprep().DataProfile._from_json(content))
except Exception:
errormsg = 'Unable to fetch profile since profile result is corrupted. Please submit a new profile run.'
_get_logger().error(errormsg)
raise AzureMLException(errormsg)
return profile
@experimental
@track(_get_logger, custom_dimensions={'app_name': 'TabularDataset'})
def get_profile_runs(self, workspace=None):
"""Return previous profile runs associated with this or same dataset in the workspace.
:param workspace: The workspace where profile run was submitted. Defaults to the workspace of this dataset.
Required if dataset is not associated to a workspace.
See https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace.workspace
for more information on workspaces.
:type workspace: azureml.core.Workspace
:return: iterator object of type azureml.core.Run.
:rtype: iter(azureml.core.Run)
"""
workspace = self._ensure_workspace(workspace)
from azureml._restclient.models import ActionRequestDto
request_dto = ActionRequestDto(
action_type=_ACTION_TYPE_PROFILE,
saved_dataset_id=self._ensure_saved(workspace),
arguments={'generate_preview': 'True', 'row_count': '1000'})
continuation_token = None
paginated_action_dto_list = []
index = 0
while index == 0 or continuation_token is not None:
paginated_action_dto = _restclient(workspace).dataset.list_actions_from_request(
workspace.subscription_id,
workspace.resource_group,
workspace.name,
dataset_id=_LEGACY_DATASET_ID,
request=request_dto,
count=1000,
custom_headers=_custom_headers,
continuation_token=continuation_token)
index = index + 1
for item in paginated_action_dto.value:
paginated_action_dto_list.append(item)
continuation_token = paginated_action_dto.continuation_token
if not paginated_action_dto_list:
raise AzureMLException('Unable to find any run information. Please submit a new profile run.')
run_list = []
for item in paginated_action_dto_list:
flag = True
# This is done to ensure backward compatibility. Earlier we do not persist
# run_id for local runs. Hence for older runs run_id is empty.
if item.run_id is None:
continue
from azureml.core import Experiment, get_run
experiment = Experiment(workspace, item.experiment_name)
try:
run = get_run(experiment, item.run_id)
except Exception:
flag = False
if flag:
run_list.append(run)
return iter(run_list)
def _time_filter(self, method, lower_bound=None, upper_bound=None, include_boundary=True, validate=True):
exception_message = 'Cannot perform time-series filter `{}` on dataset without timestamp columns defined.' \
'\nPlease use `with_timestamp_columns` to enable time-series capabilities'.format(method)
if self._properties is None \
or _DATASET_PROP_TIMESTAMP_FINE not in self._properties \
or self._properties[_DATASET_PROP_TIMESTAMP_FINE] is None:
raise DatasetTimestampMissingError(exception_message)
col_fine_timestamp = self._properties[_DATASET_PROP_TIMESTAMP_FINE]
col_coarse_timestamp = None
if _DATASET_PROP_TIMESTAMP_COARSE in self._properties \
and self._properties[_DATASET_PROP_TIMESTAMP_COARSE] is not None:
col_coarse_timestamp = self._properties[_DATASET_PROP_TIMESTAMP_COARSE]
# validate column type are datetime
if validate:
self._validate_timestamp_columns([col_fine_timestamp, col_coarse_timestamp])
dataflow = self._dataflow
# base filter, will enrich filters in following steps.
from azureml.dataprep import Expression
col_coarse_result = None
# coarse timestamp may not be assigned.
if col_coarse_timestamp:
col_coarse_filters = Expression(dataflow[col_coarse_timestamp] is not None)
if lower_bound:
# Floor the lower boundary to the second
lower_bound = lower_bound.replace(microsecond=0)
filter_coarse_lower = dataflow[col_coarse_timestamp] >= lower_bound if include_boundary \
else dataflow[col_coarse_timestamp] > lower_bound
col_coarse_filters &= filter_coarse_lower
if upper_bound:
# Strip milliseconds and microseconds from the timestamp
microsecond = upper_bound.microsecond
if microsecond > _HALF_SECOND:
# Round to nearest second
upper_bound = upper_bound + timedelta(seconds=1)
upper_bound = upper_bound.replace(microsecond=0)
filter_coarse_upper = dataflow[col_coarse_timestamp] <= upper_bound if include_boundary \
else dataflow[col_coarse_timestamp] < upper_bound
col_coarse_filters &= filter_coarse_upper
col_coarse_result = dataflow.filter(col_coarse_filters)
col_fine_filters = Expression(dataflow[col_fine_timestamp] is not None)
# fine timestamp is guaranteed to be there.
if lower_bound:
# Floor the lower boundary to the second
lower_bound = lower_bound.replace(microsecond=0)
filter_fine_lower = dataflow[col_fine_timestamp] >= lower_bound if include_boundary \
else dataflow[col_fine_timestamp] > lower_bound
col_fine_filters &= filter_fine_lower
if upper_bound:
# Strip milliseconds and microseconds from the timestamp
microsecond = upper_bound.microsecond
if microsecond > _HALF_SECOND:
# Round to nearest second
upper_bound = upper_bound + timedelta(seconds=1)
upper_bound = upper_bound.replace(microsecond=0)
filter_fine_upper = dataflow[col_fine_timestamp] <= upper_bound if include_boundary \
else dataflow[col_fine_timestamp] < upper_bound
col_fine_filters &= filter_fine_upper
result = col_coarse_result.filter(col_fine_filters) if col_coarse_result else dataflow.filter(col_fine_filters)
return TabularDataset._create(result, self._properties, telemetry_info=self._telemetry_info)
def _validate_timestamp_columns(self, columns_list):
FieldType = dataprep().api.engineapi.typedefinitions.FieldType
columns = list(filter(lambda col: col is not None, columns_list))
_validate_has_columns(self._dataflow, columns, [FieldType.DATE for c in columns])
def _ensure_workspace(self, workspace):
if workspace is not None:
return workspace
if self._registration is None or self._registration.workspace is None:
raise UserErrorException('The dataset does not belong to a workspace. Please pass in the workspace '
'from argument.')
return self._registration.workspace
| 4,596 | 0 | 112 |
3b96b87f169a53e2dc6907332c684b3e1522f75e | 3,958 | py | Python | importer.py | Popkornium18/telegram-losungen | 51e81ac7feca0bcebbc63f6160d3adf96358973d | [
"MIT"
] | 2 | 2021-07-20T20:10:20.000Z | 2021-12-17T15:38:50.000Z | importer.py | Popkornium18/telegram-losungen | 51e81ac7feca0bcebbc63f6160d3adf96358973d | [
"MIT"
] | 10 | 2021-07-27T17:19:12.000Z | 2022-03-31T15:23:05.000Z | importer.py | Popkornium18/telegram-losungen | 51e81ac7feca0bcebbc63f6160d3adf96358973d | [
"MIT"
] | null | null | null | """Functions for importing Losungen from the official download page"""
from typing import List
from zipfile import ZipFile
import datetime
import xml.etree.ElementTree as ET
import os
import re
import requests
import logging
from sqlalchemy.orm import Session
from losungen import SessionMaker
from losungen.models import TagesLosung
from losungen.repositories import TagesLosungRepository
LOSUNGEN_URL = "https://www.losungen.de/fileadmin/media-losungen/download"
LOSUNGEN_XML = "losungen.xml"
logger = logging.getLogger("telegram-losungen.importer")
def download_zip(year: int) -> bool:
"""Downloads the zipped XML file containing the Losungen of the given year"""
url = f"{LOSUNGEN_URL}/Losung_{year}_XML.zip"
try:
response = requests.get(url, allow_redirects=True)
if response.status_code == 404:
return False
logger.info("Successfully downloaded %s", url)
except requests.exceptions.RequestException as exc:
logger.exception("Unable to download %s", url)
return False
open(f"{LOSUNGEN_XML}.zip", "wb").write(response.content)
return True
def extract_zip(filename: str = f"{LOSUNGEN_XML}.zip") -> None:
"""Extracts the XML file from a Losungen zip file"""
with ZipFile(filename) as zipfile:
with open(LOSUNGEN_XML, "wb") as xmlfile:
xmlfile.write(
[
zipfile.read(file)
for file in zipfile.namelist()
if file.endswith(".xml")
][0]
)
os.remove(filename)
logger.info("Successfully extracted %s", filename)
def import_xml(filename: str = LOSUNGEN_XML) -> None:
"""Imports all Losungen contained in the given XML file"""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
for losung in _load_xml(filename):
repo.add(losung)
session.commit()
def import_year(year: int = None) -> bool:
"""Downloads, extracts and imports the Losungen of a given year.
The year defaults to the next year."""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
year = datetime.date.today().year + 1 if year is None else year
losungen = repo.get_by_year(year)
session.close()
if losungen:
return True # Already imported
if download_zip(year):
extract_zip()
import_xml()
logger.info("Successfully imported Losungen for %i", year)
return True
logger.warning("Failed to download zip archive for %i", year)
return False
def initial_import() -> None:
"""Imports all available zip archives from the Losungen download page"""
year = datetime.date.today().year
year_iter = year
while import_year(year_iter):
year_iter -= 1
year_iter = year + 1
while import_year(year_iter):
year_iter += 1
| 31.664 | 81 | 0.650076 | """Functions for importing Losungen from the official download page"""
from typing import List
from zipfile import ZipFile
import datetime
import xml.etree.ElementTree as ET
import os
import re
import requests
import logging
from sqlalchemy.orm import Session
from losungen import SessionMaker
from losungen.models import TagesLosung
from losungen.repositories import TagesLosungRepository
LOSUNGEN_URL = "https://www.losungen.de/fileadmin/media-losungen/download"
LOSUNGEN_XML = "losungen.xml"
logger = logging.getLogger("telegram-losungen.importer")
def download_zip(year: int) -> bool:
"""Downloads the zipped XML file containing the Losungen of the given year"""
url = f"{LOSUNGEN_URL}/Losung_{year}_XML.zip"
try:
response = requests.get(url, allow_redirects=True)
if response.status_code == 404:
return False
logger.info("Successfully downloaded %s", url)
except requests.exceptions.RequestException as exc:
logger.exception("Unable to download %s", url)
return False
open(f"{LOSUNGEN_XML}.zip", "wb").write(response.content)
return True
def extract_zip(filename: str = f"{LOSUNGEN_XML}.zip") -> None:
"""Extracts the XML file from a Losungen zip file"""
with ZipFile(filename) as zipfile:
with open(LOSUNGEN_XML, "wb") as xmlfile:
xmlfile.write(
[
zipfile.read(file)
for file in zipfile.namelist()
if file.endswith(".xml")
][0]
)
os.remove(filename)
logger.info("Successfully extracted %s", filename)
def _load_xml(filename: str) -> None:
tree = ET.parse(LOSUNGEN_XML)
os.remove(filename)
root = tree.getroot()
losungen: List[TagesLosung] = []
for day in root:
date_str = day.find("Datum").text
date = datetime.datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S").date()
special_date = day.find("Sonntag").text
# Strip unnecessary text from special_date
special_date_short = (
special_date
if special_date is None
else re.sub(r" \(.*\)", "", str(special_date))
)
losung = day.find("Losungstext").text
losung_verse = day.find("Losungsvers").text
lehrtext_verse = day.find("Lehrtextvers").text
lehrtext = day.find("Lehrtext").text
tageslosung = TagesLosung(
date=date,
special_date=special_date_short,
losung=losung,
losung_verse=losung_verse,
lehrtext=lehrtext,
lehrtext_verse=lehrtext_verse,
)
losungen.append(tageslosung)
return losungen
def import_xml(filename: str = LOSUNGEN_XML) -> None:
"""Imports all Losungen contained in the given XML file"""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
for losung in _load_xml(filename):
repo.add(losung)
session.commit()
def import_year(year: int = None) -> bool:
"""Downloads, extracts and imports the Losungen of a given year.
The year defaults to the next year."""
session: Session = SessionMaker()
repo = TagesLosungRepository(session)
year = datetime.date.today().year + 1 if year is None else year
losungen = repo.get_by_year(year)
session.close()
if losungen:
return True # Already imported
if download_zip(year):
extract_zip()
import_xml()
logger.info("Successfully imported Losungen for %i", year)
return True
logger.warning("Failed to download zip archive for %i", year)
return False
def initial_import() -> None:
"""Imports all available zip archives from the Losungen download page"""
year = datetime.date.today().year
year_iter = year
while import_year(year_iter):
year_iter -= 1
year_iter = year + 1
while import_year(year_iter):
year_iter += 1
| 1,049 | 0 | 23 |
3c1623a59de3e86e1eeaf18cc199ea8f5630d9e0 | 12,801 | py | Python | src/chemftr/thc/utils/thc_objectives.py | ncrubin/chemftr | 5cbbec7138d4c07f4b7facbe5052d0cbe00ecbda | [
"Apache-2.0"
] | null | null | null | src/chemftr/thc/utils/thc_objectives.py | ncrubin/chemftr | 5cbbec7138d4c07f4b7facbe5052d0cbe00ecbda | [
"Apache-2.0"
] | null | null | null | src/chemftr/thc/utils/thc_objectives.py | ncrubin/chemftr | 5cbbec7138d4c07f4b7facbe5052d0cbe00ecbda | [
"Apache-2.0"
] | null | null | null | import os
# set mkl thread count for numpy einsum/tensordot calls
# leave one CPU un used so we can still access this computer
import scipy.optimize
os.environ["MKL_NUM_THREADS"] = "{}".format(os.cpu_count() - 1)
# os.environ["MKL_NUM_THREADS"] = "40" # "{}".format(os.cpu_count() - 1)
import jax.numpy as jnp
from jax.config import config
config.update("jax_enable_x64", True)
# from jax.experimental import optimizers
from jax import jit, grad
from .adagrad import adagrad
import h5py
import numpy
import numpy.random
import numpy.linalg
from scipy.optimize import minimize
from uuid import uuid4
def thc_objective_jax(xcur, norb, nthc, eri):
"""
Loss function for THC factorization using jax numpy
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:return:
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(nthc, nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * jnp.sum((deri) ** 2)
return res
def thc_objective_grad_jax(xcur, norb, nthc, eri):
"""
Gradient for THC least-squares objective jax compatible
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(nthc, nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * jnp.sum((deri) ** 2)
# O(norb^5)
dL_dZab = -jnp.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=[(0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * jnp.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP,
optimize=[(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= jnp.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP,
# optimize=[(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * jnp.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp,
optimize=[(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= jnp.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp,
# optimize=[(0, 1), (0, 2), (0, 1)])
return jnp.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res, numpy.max(numpy.abs(deri))))
return res
def thc_objective_regularized(xcur, norb, nthc, eri, penalty_param, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
# res = 0.5 * numpy.sum((deri)**2)
SPQ = etaPp.dot(etaPp.T) # (nthc x norb) x (norb x nthc) -> (nthc x nthc) metric
cP = jnp.diag(jnp.diag(SPQ)) # grab diagonal elements. equivalent to np.diag(np.diagonal(SPQ))
# no sqrts because we have two normalized THC vectors (index by mu and nu) on each side.
MPQ_normalized = cP.dot(MPQ).dot(cP) # get normalized zeta in Eq. 11 & 12
lambda_z = jnp.sum(jnp.abs(MPQ_normalized)) * 0.5
# lambda_z = jnp.sum(MPQ_normalized**2) * 0.5
res = 0.5 * jnp.sum((deri)**2) + penalty_param * (lambda_z**2)
if verbose:
print("res, max, lambda**2 = {}, {}".format(res, lambda_z**2))
return res
def thc_objective_grad(xcur, norb, nthc, eri, verbose=False):
"""
Gradient for THC least-squares objective
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
"""
etaPp = numpy.array(xcur[:norb*nthc]).reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = numpy.array(xcur[norb*nthc:norb*nthc+nthc*nthc]).reshape(nthc,nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res, numpy.max(numpy.abs(deri))))
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * numpy.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
return numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective_and_grad(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
# path = numpy.einsum_path('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize='optimal')
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^4 * nthc)
# leaving the commented out code for documentation purposes
dL_dX_GT = -2 * numpy.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
return res, numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def cp_ls_cholesky_factor_objective(beta_gamma, norb, nthc, cholesky_factor, calcgrad=False):
"""cholesky_factor is reshaped into (norb, norb, num_cholesky)
Cholesky factor B_{ab,x}
Least squares fit objective ||B_{ab,x} - \sum_{r}beta_{a,x}beta_{b,x}gamma_{ab,x}||
This function provides the objective function value and gradient with respect to beta and gamma
"""
# compute objective
num_cholfactors = cholesky_factor.shape[-1]
beta_bR = beta_gamma[:norb*nthc].reshape((norb, nthc))
gamma_yR = beta_gamma[norb*nthc:norb*nthc+nthc*num_cholfactors].reshape((num_cholfactors, nthc))
beta_abR = numpy.einsum('aR,bR->abR', beta_bR, beta_bR)
chol_approx = numpy.einsum('abR,XR->abX', beta_abR, gamma_yR)
delta = cholesky_factor - chol_approx
fval = 0.5 * numpy.sum((delta)**2)
if calcgrad:
# compute grad
# \partial O / \partial beta_{c,s}
grad_beta = -2 * numpy.einsum('Cbx,bS,xS->CS', delta, beta_bR, gamma_yR, optimize=['einsum_path', (0, 2), (0, 1)])
grad_gamma = -numpy.einsum('abY,aS,bS->YS', delta, beta_bR, beta_bR, optimize=['einsum_path', (1, 2), (0, 1)])
grad = numpy.hstack((grad_beta.ravel(), grad_gamma.ravel()))
return fval, grad
else:
return fval
if __name__ == "__main__":
numpy.random.seed(25)
norb = 2
nthc = 10
penalty_param = 1.0E-6
etaPp = numpy.random.randn(norb * nthc).reshape((nthc, norb))
MPQ = numpy.random.randn(nthc**2).reshape((nthc, nthc))
MPQ = MPQ + MPQ.T
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
CprP_jax = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
eri = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
eri_jax = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
assert numpy.allclose(eri_jax, eri)
xcur = numpy.hstack((etaPp.ravel(), MPQ.ravel()))
etaPp2 = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
assert numpy.allclose(etaPp2, etaPp)
MPQ2 = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
assert numpy.allclose(MPQ2, MPQ)
CprP2 = numpy.einsum("Pp,Pr->prP", etaPp2, etaPp2) # this is einsum('mp,mq->pqm', etaPp, etaPp)
assert numpy.allclose(CprP2, CprP)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP2, MPQ2, CprP2, optimize=['einsum_path', (0, 1), (0, 1)])
assert numpy.allclose(Iapprox, eri)
deri = eri - Iapprox
res = thc_objective_regularized(xcur, norb, nthc, eri, penalty_param, verbose=True)
print(res)
thc_grad = grad(thc_objective_regularized, argnums=[0])
print(thc_grad(jnp.array(xcur), norb, nthc, jnp.array(eri), penalty_param))
res = scipy.optimize.minimize(thc_objective_regularized, jnp.array(xcur), args=(norb, nthc, jnp.array(eri), penalty_param), method='L-BFGS-B',
jac=thc_grad, options={'disp': None, 'iprint': 98})
print(res)
xcur = numpy.array(res.x)
etaPp2 = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ2 = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP2 = numpy.einsum("Pp,Pr->prP", etaPp2, etaPp2) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP2, MPQ2, CprP2, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
print(jnp.linalg.norm(deri))
| 43.989691 | 146 | 0.636435 | import os
# set mkl thread count for numpy einsum/tensordot calls
# leave one CPU un used so we can still access this computer
import scipy.optimize
os.environ["MKL_NUM_THREADS"] = "{}".format(os.cpu_count() - 1)
# os.environ["MKL_NUM_THREADS"] = "40" # "{}".format(os.cpu_count() - 1)
import jax.numpy as jnp
from jax.config import config
config.update("jax_enable_x64", True)
# from jax.experimental import optimizers
from jax import jit, grad
from .adagrad import adagrad
import h5py
import numpy
import numpy.random
import numpy.linalg
from scipy.optimize import minimize
from uuid import uuid4
def thc_objective_jax(xcur, norb, nthc, eri):
"""
Loss function for THC factorization using jax numpy
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:return:
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(nthc, nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * jnp.sum((deri) ** 2)
return res
def thc_objective_grad_jax(xcur, norb, nthc, eri):
"""
Gradient for THC least-squares objective jax compatible
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
"""
etaPp = xcur[:norb * nthc].reshape(nthc, norb) # leaf tensor nthc x norb
MPQ = xcur[norb * nthc:norb * nthc + nthc * nthc].reshape(nthc, nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * jnp.sum((deri) ** 2)
# O(norb^5)
dL_dZab = -jnp.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=[(0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * jnp.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP,
optimize=[(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= jnp.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP,
# optimize=[(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * jnp.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp,
optimize=[(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= jnp.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp,
# optimize=[(0, 1), (0, 2), (0, 1)])
return jnp.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res, numpy.max(numpy.abs(deri))))
return res
def thc_objective_regularized(xcur, norb, nthc, eri, penalty_param, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
deri = eri - Iapprox
# res = 0.5 * numpy.sum((deri)**2)
SPQ = etaPp.dot(etaPp.T) # (nthc x norb) x (norb x nthc) -> (nthc x nthc) metric
cP = jnp.diag(jnp.diag(SPQ)) # grab diagonal elements. equivalent to np.diag(np.diagonal(SPQ))
# no sqrts because we have two normalized THC vectors (index by mu and nu) on each side.
MPQ_normalized = cP.dot(MPQ).dot(cP) # get normalized zeta in Eq. 11 & 12
lambda_z = jnp.sum(jnp.abs(MPQ_normalized)) * 0.5
# lambda_z = jnp.sum(MPQ_normalized**2) * 0.5
res = 0.5 * jnp.sum((deri)**2) + penalty_param * (lambda_z**2)
if verbose:
print("res, max, lambda**2 = {}, {}".format(res, lambda_z**2))
return res
def thc_objective_grad(xcur, norb, nthc, eri, verbose=False):
"""
Gradient for THC least-squares objective
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
"""
etaPp = numpy.array(xcur[:norb*nthc]).reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = numpy.array(xcur[norb*nthc:norb*nthc+nthc*nthc]).reshape(nthc,nthc) # central tensor
# m indexes the nthc and p,q,r,s are orbital indices
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
if verbose:
print("res, max, lambda = {}, {}".format(res, numpy.max(numpy.abs(deri))))
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^5)
dL_dX_GT = -2 * numpy.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
return numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def thc_objective_and_grad(xcur, norb, nthc, eri, verbose=False):
"""
Loss function for THC factorization
0.5 \sum_{pqrs}(eri(pqrs) - G(pqrs))^{2}
G(pqrs) = \sum_{uv}X_{u,p}X_{u,q}Z_{uv}X_{v,r}X_{v,s}
:param xcur: Current parameters for eta and Z
:param norb: number of orbitals
:param nthc: thc-basis dimension
:param eri: two-electron repulsion integrals in chemist notation
:param verbose: optional (False) for print iteration residual and infinity norm
:return:
"""
etaPp = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
# path = numpy.einsum_path('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize='optimal')
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
res = 0.5 * numpy.sum((deri)**2)
# O(norb^5)
dL_dZab = -numpy.einsum('pqrs,pqA,rsB->AB', deri, CprP, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
# O(norb^4 * nthc)
# leaving the commented out code for documentation purposes
dL_dX_GT = -2 * numpy.einsum('Tqrs,Gq,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pTrs,Gp,Gv,rsv->GT', deri, etaPp, MPQ, CprP, optimize=['einsum_path',(0, 3), (1, 2), (0, 1)])
dL_dX_GT -= 2 * numpy.einsum('pqTs,pqu,uG,Gs->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
# dL_dX_GT -= numpy.einsum('pqrT,pqu,uG,Gr->GT', deri, CprP, MPQ, etaPp, optimize=['einsum_path',(0, 1), (0, 2), (0, 1)])
return res, numpy.hstack((dL_dX_GT.ravel(), dL_dZab.ravel()))
def cp_ls_cholesky_factor_objective(beta_gamma, norb, nthc, cholesky_factor, calcgrad=False):
"""cholesky_factor is reshaped into (norb, norb, num_cholesky)
Cholesky factor B_{ab,x}
Least squares fit objective ||B_{ab,x} - \sum_{r}beta_{a,x}beta_{b,x}gamma_{ab,x}||
This function provides the objective function value and gradient with respect to beta and gamma
"""
# compute objective
num_cholfactors = cholesky_factor.shape[-1]
beta_bR = beta_gamma[:norb*nthc].reshape((norb, nthc))
gamma_yR = beta_gamma[norb*nthc:norb*nthc+nthc*num_cholfactors].reshape((num_cholfactors, nthc))
beta_abR = numpy.einsum('aR,bR->abR', beta_bR, beta_bR)
chol_approx = numpy.einsum('abR,XR->abX', beta_abR, gamma_yR)
delta = cholesky_factor - chol_approx
fval = 0.5 * numpy.sum((delta)**2)
if calcgrad:
# compute grad
# \partial O / \partial beta_{c,s}
grad_beta = -2 * numpy.einsum('Cbx,bS,xS->CS', delta, beta_bR, gamma_yR, optimize=['einsum_path', (0, 2), (0, 1)])
grad_gamma = -numpy.einsum('abY,aS,bS->YS', delta, beta_bR, beta_bR, optimize=['einsum_path', (1, 2), (0, 1)])
grad = numpy.hstack((grad_beta.ravel(), grad_gamma.ravel()))
return fval, grad
else:
return fval
if __name__ == "__main__":
numpy.random.seed(25)
norb = 2
nthc = 10
penalty_param = 1.0E-6
etaPp = numpy.random.randn(norb * nthc).reshape((nthc, norb))
MPQ = numpy.random.randn(nthc**2).reshape((nthc, nthc))
MPQ = MPQ + MPQ.T
CprP = numpy.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
CprP_jax = jnp.einsum("Pp,Pr->prP", etaPp, etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
eri = numpy.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=['einsum_path', (0, 1), (0, 1)])
eri_jax = jnp.einsum('pqU,UV,rsV->pqrs', CprP, MPQ, CprP, optimize=[(0, 1), (0, 1)])
assert numpy.allclose(eri_jax, eri)
xcur = numpy.hstack((etaPp.ravel(), MPQ.ravel()))
etaPp2 = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
assert numpy.allclose(etaPp2, etaPp)
MPQ2 = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
assert numpy.allclose(MPQ2, MPQ)
CprP2 = numpy.einsum("Pp,Pr->prP", etaPp2, etaPp2) # this is einsum('mp,mq->pqm', etaPp, etaPp)
assert numpy.allclose(CprP2, CprP)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP2, MPQ2, CprP2, optimize=['einsum_path', (0, 1), (0, 1)])
assert numpy.allclose(Iapprox, eri)
deri = eri - Iapprox
res = thc_objective_regularized(xcur, norb, nthc, eri, penalty_param, verbose=True)
print(res)
thc_grad = grad(thc_objective_regularized, argnums=[0])
print(thc_grad(jnp.array(xcur), norb, nthc, jnp.array(eri), penalty_param))
res = scipy.optimize.minimize(thc_objective_regularized, jnp.array(xcur), args=(norb, nthc, jnp.array(eri), penalty_param), method='L-BFGS-B',
jac=thc_grad, options={'disp': None, 'iprint': 98})
print(res)
xcur = numpy.array(res.x)
etaPp2 = xcur[:norb*nthc].reshape(nthc,norb) # leaf tensor nthc x norb
MPQ2 = xcur[norb*nthc:norb*nthc+nthc*nthc].reshape(nthc,nthc) # central tensor
CprP2 = numpy.einsum("Pp,Pr->prP", etaPp2, etaPp2) # this is einsum('mp,mq->pqm', etaPp, etaPp)
Iapprox = numpy.einsum('pqU,UV,rsV->pqrs', CprP2, MPQ2, CprP2, optimize=['einsum_path', (0, 1), (0, 1)])
deri = eri - Iapprox
print(jnp.linalg.norm(deri))
| 0 | 0 | 0 |
999ced6d71016ca04ed15fc9b48b58f28429a858 | 93 | py | Python | test/example_test.py | tomaskala/python-template | 0b4ea38425fa432092d388530224240dc9c46854 | [
"Unlicense"
] | null | null | null | test/example_test.py | tomaskala/python-template | 0b4ea38425fa432092d388530224240dc9c46854 | [
"Unlicense"
] | null | null | null | test/example_test.py | tomaskala/python-template | 0b4ea38425fa432092d388530224240dc9c46854 | [
"Unlicense"
] | null | null | null | from project_template.main import return_true
| 15.5 | 45 | 0.784946 | from project_template.main import return_true
def test_example():
assert return_true()
| 23 | 0 | 23 |
ec3443d021e3dcecbf81a5eaadc6d0a5332c17eb | 3,111 | py | Python | glaive/glaive.py | peixian/Ultralisk | 7de450ac5986212cf13fc5b861a34d35257a48f9 | [
"MIT"
] | null | null | null | glaive/glaive.py | peixian/Ultralisk | 7de450ac5986212cf13fc5b861a34d35257a48f9 | [
"MIT"
] | null | null | null | glaive/glaive.py | peixian/Ultralisk | 7de450ac5986212cf13fc5b861a34d35257a48f9 | [
"MIT"
] | null | null | null | import sklearn.cross_validation as cv
from sklearn import tree
import pandas as pd
import numpy as np
import os.path
import pprint
import matplotlib.pyplot as plt
import seaborn as sns
import runExperiments
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.xml.networkwriter import NetworkWriter
from pybrain.tools.xml.networkreader import NetworkReader
def lossCalculation(model):
"""Evaluates the total loss on the dataset"""
w1, b1, w2, b2 = model["w1"], model['b1'], model["w2"], model["b2"]
z1 = X.dot
def normalize(lst):
"""Normalizes the list, reducing the x by 640 and y by 720"""
normed = map(lambda p: (p[0]/640.0, p[1]/720.0), lst)
return normed
def createNet():
"""Create and seed the intial neural network"""
#CONSTANTS
nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]
allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()
ds = SupervisedDataSet(nn_input_dim, nn_output_dim)
#normalizes and adds it to the dataset
for i in range(0, len(allyTrainingPos)):
x = normalize(enemyTrainingPos[i])
y = normalize(allyTrainingPos[i])
x = [val for pair in x for val in pair]
y = [val for pair in y for val in pair]
ds.addSample(x, y)
for inpt, target in ds:
print inpt, target
net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence()
NetworkWriter.writeToFile(net, "net.xml")
enemyTestPos = runExperiments.makeTestDataset()
print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
return ds
def startTrials(ds, maxTrials = 2, maxExperiments = 2):
"""start and run the trials"""
hpCount = []
for i in range(0, maxExperiments):
for j in range(0, maxTrials):
enemyTestPos = runExperiments.makeTestDataset()
net = NetworkReader.readFrom("net.xml")
netResults = net.activate([val for pair in normalize(enemyTestPos) for val in pair])
netIter = iter(netResults)
allyTestPos = zip(netIter, netIter)
#undo normalization
allyTestPos = map(lambda p: (abs(p[0]*640), abs(p[1]*720)), allyTestPos)
print(allyTestPos)
runExperiments.writeTestData(allyTestPos)
runExperiments.run()
with open("exp_results_raw.txt", "r") as resultsFile:
lines = resultsFile.readlines()
if "Zerg_Zergling" in lines[1]:
x = normalize(enemyTestPos)
y = normalize(allyTestPos)
x = [val for pair in x for val in pair]
y = [val for pair in y for val in pair]
ds.addSample(x, y)
lineSplit = lines[1].split("Zerg_Zergling")[-1]
hpCount.append(lineSplit.split(" ")[2])
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence()
return hpCount
ds = createNet()
hpCount = startTrials(ds, 30, 10)
print(hpCount)
fig, ax = plt.subplots()
ax.plot(range(0, len(hpCount)),hpCount)
plt.show() | 33.451613 | 87 | 0.728705 | import sklearn.cross_validation as cv
from sklearn import tree
import pandas as pd
import numpy as np
import os.path
import pprint
import matplotlib.pyplot as plt
import seaborn as sns
import runExperiments
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.xml.networkwriter import NetworkWriter
from pybrain.tools.xml.networkreader import NetworkReader
def lossCalculation(model):
"""Evaluates the total loss on the dataset"""
w1, b1, w2, b2 = model["w1"], model['b1'], model["w2"], model["b2"]
z1 = X.dot
def normalize(lst):
"""Normalizes the list, reducing the x by 640 and y by 720"""
normed = map(lambda p: (p[0]/640.0, p[1]/720.0), lst)
return normed
def createNet():
"""Create and seed the intial neural network"""
#CONSTANTS
nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]
allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()
ds = SupervisedDataSet(nn_input_dim, nn_output_dim)
#normalizes and adds it to the dataset
for i in range(0, len(allyTrainingPos)):
x = normalize(enemyTrainingPos[i])
y = normalize(allyTrainingPos[i])
x = [val for pair in x for val in pair]
y = [val for pair in y for val in pair]
ds.addSample(x, y)
for inpt, target in ds:
print inpt, target
net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence()
NetworkWriter.writeToFile(net, "net.xml")
enemyTestPos = runExperiments.makeTestDataset()
print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
return ds
def startTrials(ds, maxTrials = 2, maxExperiments = 2):
"""start and run the trials"""
hpCount = []
for i in range(0, maxExperiments):
for j in range(0, maxTrials):
enemyTestPos = runExperiments.makeTestDataset()
net = NetworkReader.readFrom("net.xml")
netResults = net.activate([val for pair in normalize(enemyTestPos) for val in pair])
netIter = iter(netResults)
allyTestPos = zip(netIter, netIter)
#undo normalization
allyTestPos = map(lambda p: (abs(p[0]*640), abs(p[1]*720)), allyTestPos)
print(allyTestPos)
runExperiments.writeTestData(allyTestPos)
runExperiments.run()
with open("exp_results_raw.txt", "r") as resultsFile:
lines = resultsFile.readlines()
if "Zerg_Zergling" in lines[1]:
x = normalize(enemyTestPos)
y = normalize(allyTestPos)
x = [val for pair in x for val in pair]
y = [val for pair in y for val in pair]
ds.addSample(x, y)
lineSplit = lines[1].split("Zerg_Zergling")[-1]
hpCount.append(lineSplit.split(" ")[2])
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence()
return hpCount
ds = createNet()
hpCount = startTrials(ds, 30, 10)
print(hpCount)
fig, ax = plt.subplots()
ax.plot(range(0, len(hpCount)),hpCount)
plt.show() | 0 | 0 | 0 |
f70c77dce55c5afe74d3faf888bccf4b6e67116c | 6,274 | py | Python | A2C/utils.py | CommanderCero/RL_Algorithms | fd8172e0075247b682a1dca752306147fa2ed3ba | [
"Apache-2.0"
] | 1 | 2021-10-06T14:45:41.000Z | 2021-10-06T14:45:41.000Z | A2C/utils.py | CommanderCero/RL_Algorithms | fd8172e0075247b682a1dca752306147fa2ed3ba | [
"Apache-2.0"
] | null | null | null | A2C/utils.py | CommanderCero/RL_Algorithms | fd8172e0075247b682a1dca752306147fa2ed3ba | [
"Apache-2.0"
] | null | null | null | import scipy.signal as signal
import torch
import torch.nn as nn
import numpy as np
import models
import gym
import wandb
def discounted_cumsum(rewards, reward_decay):
"""Taken from https://stackoverflow.com/questions/47970683/vectorize-a-numpy-discount-calculation"""
return signal.lfilter([1], [1, -reward_decay], x=rewards[::-1])[::-1]
| 36.265896 | 104 | 0.603762 | import scipy.signal as signal
import torch
import torch.nn as nn
import numpy as np
import models
import gym
import wandb
def create_feedforward(sizes, activation=nn.ReLU):
layers = []
for i in range(len(sizes) - 1):
layers.append(nn.Linear(sizes[i], sizes[i+1]))
if i < len(sizes) - 2:
layers.append(activation())
return nn.Sequential(*layers)
def get_shape(shape):
if shape is None:
return ()
return shape
def discounted_cumsum(rewards, reward_decay):
"""Taken from https://stackoverflow.com/questions/47970683/vectorize-a-numpy-discount-calculation"""
return signal.lfilter([1], [1, -reward_decay], x=rewards[::-1])[::-1]
class TrajectoryBuffer:
def __init__(self, observation_shape, action_shape, size, reward_decay=0.99):
self.max_size = size
self.trajectory_start = 0
self.pos = 0
self.reward_decay = reward_decay
self.observations = np.empty((size, *observation_shape), dtype=np.float32)
self.actions = np.empty((size, *get_shape(action_shape)), dtype=np.float32)
self.rewards = np.empty((size,), dtype=np.float32)
self.returns = np.empty((size,), dtype=np.float32)
self.dones = np.empty((size,), dtype=np.float32)
def store(self, observation, action, reward, done):
assert self.pos < self.max_size, "Buffer Overflow"
self.observations[self.pos] = observation
self.actions[self.pos] = action
self.rewards[self.pos] = reward
self.dones[self.pos] = done
self.pos += 1
def end_trajectory(self, value=0):
# Compute return
sl = slice(self.trajectory_start, self.pos)
rewards = self.rewards[sl]
rewards = np.append(rewards, value)
self.returns[sl] = discounted_cumsum(rewards, self.reward_decay)[:-1]
self.trajectory_start = self.pos
def get_data(self):
sl = slice(0, self.pos)
data = dict(
observations=self.observations[sl],
actions=self.actions[sl],
rewards=self.rewards[sl],
returns=self.returns[sl],
dones=self.dones[sl]
)
return {key : torch.from_numpy(value) for key, value in data.items()}
def clear(self):
self.pos = 0
self.trajectory_start = 0
class VecTrajectoryBuffer:
def __init__(self, observation_shape, action_shape, num_envs, size, reward_decay=0.99):
self.max_size = size
self.pos = 0
self.reward_decay = reward_decay
self.traj_starts = np.zeros((num_envs,), dtype=int)
self.observations = np.empty((size, num_envs, *observation_shape), dtype=np.float32)
self.actions = np.empty((size, num_envs, *get_shape(action_shape)), dtype=np.float32)
self.rewards = np.empty((size, num_envs), dtype=np.float32)
self.returns = np.empty((size, num_envs), dtype=np.float32)
self.dones = np.empty((size, num_envs), dtype=np.float32)
def store(self, observations, actions, rewards, dones):
assert self.pos < self.max_size, "Buffer Overflow"
self.observations[self.pos] = observations
self.actions[self.pos] = actions
self.rewards[self.pos] = rewards
self.dones[self.pos] = dones
self.pos += 1
# Compute returns
for env_index, done in enumerate(dones):
if done:
self._end_trajectory(env_index)
def end_trajectory(self, values):
for env_index, value in enumerate(values):
self._end_trajectory(env_index, value)
def _end_trajectory(self, env_index, value=0):
# Compute return
sl = slice(self.traj_starts[env_index], self.pos)
rewards = self.rewards[sl, env_index]
rewards = np.append(rewards, value)
self.returns[sl, env_index] = discounted_cumsum(rewards, self.reward_decay)[:-1]
# Update trajectory start
self.traj_starts[env_index] = self.pos
def get_data(self, device=torch.device('cpu')):
sl = slice(0, self.pos)
data = dict(
observations=self._remove_env_axis(self.observations[sl]),
actions=self._remove_env_axis(self.actions[sl]),
rewards=self._remove_env_axis(self.rewards[sl]),
returns=self._remove_env_axis(self.returns[sl]),
dones=self._remove_env_axis(self.dones[sl])
)
return {key : torch.from_numpy(value).to(device) for key, value in data.items()}
def clear(self):
self.pos = 0
self.traj_starts.fill(0)
def _remove_env_axis(self, array):
# array.shape = (size, num_envs, ???)
shape = array.shape
# Swap size with num_envs to ensure reshaping won't mix trajectories
array = array.swapaxes(0, 1)
# Flatten
new_shape = (shape[0] * shape[1], *shape[2:])
array = array.reshape(new_shape)
return array
def play(model: models.Policy, env: gym.Env, repeats=10, device=torch.device('cpu')):
for _ in range(repeats):
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state]).to(device)
action = model.get_actions(inp)[0]
state, reward, done, _ = env.step(action)
env.render()
env.close()
def capture_video(model: models.Policy, env: gym.Env, fps=30, device=torch.device('cpu')):
frames = []
reward_sum = 0
step_count = 0
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state]).to(device)
action = model.get_actions(inp)[0]
state, reward, done, _ = env.step(action)
frames.append(np.array(env.render("rgb_array")))
reward_sum += reward
step_count += 1
frames = np.array(frames) # (Time, Width, Height, Channels)
frames = np.moveaxis(frames, 3, 1) # (Time, Channels, Width, Height)
return wandb.Video(frames, caption=f"RewardSum={reward_sum}; EpisodeLength={step_count}", fps=fps) | 5,322 | 7 | 580 |
69621724e7bf731c95055584838600b07febc5a3 | 524 | py | Python | busqueda_binaria.py | SebaB29/Python | 8fe7b375e200d2a629e3ef83a2356002621267a6 | [
"MIT"
] | null | null | null | busqueda_binaria.py | SebaB29/Python | 8fe7b375e200d2a629e3ef83a2356002621267a6 | [
"MIT"
] | null | null | null | busqueda_binaria.py | SebaB29/Python | 8fe7b375e200d2a629e3ef83a2356002621267a6 | [
"MIT"
] | null | null | null |
lista = [1,2,3,4,5,6,7,89,9,8,5,5,2,1]
print(busqueda_binaria(lista, 9)) | 32.75 | 67 | 0.664122 | def _busqueda_binaria(lista, elemento, desde, hasta):
if desde > hasta:
return -1
medio = (hasta + desde) // 2
if lista[medio] == elemento:
return medio
if elemento < lista[medio]:
return _busqueda_binaria(lista, elemento, desde, medio - 1)
return _busqueda_binaria(lista, elemento, medio + 1, hasta)
def busqueda_binaria(lista, elemento):
return _busqueda_binaria(lista, elemento, 0, len(lista) - 1)
lista = [1,2,3,4,5,6,7,89,9,8,5,5,2,1]
print(busqueda_binaria(lista, 9)) | 405 | 0 | 45 |
73c88ffa050bfb56ce4452497b71a7e5d65513ba | 3,126 | py | Python | 24. Exam Prep/exam_10apr/tests/test_survivor.py | elenaborisova/Python-OOP | 584882c08f84045b12322917f0716c7c7bd9befc | [
"MIT"
] | 1 | 2021-03-27T16:56:30.000Z | 2021-03-27T16:56:30.000Z | 24. Exam Prep/exam_10apr/tests/test_survivor.py | elenaborisova/Python-OOP | 584882c08f84045b12322917f0716c7c7bd9befc | [
"MIT"
] | null | null | null | 24. Exam Prep/exam_10apr/tests/test_survivor.py | elenaborisova/Python-OOP | 584882c08f84045b12322917f0716c7c7bd9befc | [
"MIT"
] | 1 | 2021-03-15T14:50:39.000Z | 2021-03-15T14:50:39.000Z | import unittest
from exam_10apr.project.survivor import Survivor
if __name__ == '__main__':
unittest.main()
| 32.905263 | 87 | 0.710493 | import unittest
from exam_10apr.project.survivor import Survivor
class SurvivorTests(unittest.TestCase):
def test_survivorInit_whenNameIsValid_shouldAssignIt(self):
survivor = Survivor('test', 23)
self.assertEqual('test', survivor.name)
def test_survivorInit_whenNameIsInvalid_shouldRaise(self):
with self.assertRaises(ValueError) as context:
Survivor('', 23)
self.assertIsNotNone(context.exception)
self.assertEqual(str(context.exception), 'Name not valid!')
def test_survivorInit_whenAgeIsValid_shouldAssignIt(self):
survivor = Survivor('test', 23)
self.assertEqual(23, survivor.age)
def test_survivorInit_whenAgeIsInvalid_shouldRaise(self):
with self.assertRaises(ValueError) as context:
Survivor('test', -23)
self.assertIsNotNone(context.exception)
self.assertEqual(str(context.exception), 'Age not valid!')
def test_survivorInit_whenHealthIsValid_shouldAssignIt(self):
survivor = Survivor('test', 23)
survivor.health = 50
self.assertEqual(50, survivor.health)
def test_survivorInit_whenHealthIsGreaterThan100_shouldSetTo100(self):
survivor = Survivor('test', 23)
survivor.health = 150
self.assertEqual(100, survivor.health)
def test_survivorInit_whenHealthIsInvalid_shouldRaise(self):
survivor = Survivor('test', 23)
with self.assertRaises(ValueError) as context:
survivor.health = -100
self.assertIsNotNone(context.exception)
self.assertEqual(str(context.exception), 'Health not valid!')
def test_survivorInit_whenNeedsIsValid_shouldAssignIt(self):
survivor = Survivor('test', 23)
survivor.needs = 50
self.assertEqual(50, survivor.needs)
def test_survivorInit_whenNeedsIsGreaterThan100_shouldSetTo100(self):
survivor = Survivor('test', 23)
survivor.needs = 150
self.assertEqual(100, survivor.needs)
def test_survivorInit_whenNeedsIsInvalid_shouldRaise(self):
survivor = Survivor('test', 23)
with self.assertRaises(ValueError) as context:
survivor.needs = -100
self.assertIsNotNone(context.exception)
self.assertEqual(str(context.exception), 'Needs not valid!')
def test_survivorNeedsSustenance_whenNeedsLessThan100_shouldReturnTrue(self):
survivor = Survivor('test', 23)
survivor.needs = 50
self.assertTrue(survivor.needs_sustenance)
def test_survivorNeedsSustenance_whenNeedsMoreOrEqualTo100_shouldReturnFalse(self):
survivor = Survivor('test', 23)
self.assertFalse(survivor.needs_sustenance)
def test_survivorNeedsHealing_whenHealthLessThan100_shouldReturnTrue(self):
survivor = Survivor('test', 23)
survivor.health = 50
self.assertTrue(survivor.needs_healing)
def test_survivorNeedsHealing_whenHealthMoreOrEqualTo100_shouldReturnFalse(self):
survivor = Survivor('test', 23)
self.assertFalse(survivor.needs_healing)
if __name__ == '__main__':
unittest.main()
| 2,593 | 18 | 400 |
86be5e7da58e9d936e27b9f8b0e218fff4eb3310 | 3,433 | py | Python | tests/integration_tests/security/api_tests.py | delorenzosoftware/superset | 5403f1ec163a52623f34f459d89f20e4e190371d | [
"Apache-2.0"
] | 1 | 2022-02-18T10:21:55.000Z | 2022-02-18T10:21:55.000Z | tests/integration_tests/security/api_tests.py | changeiot/superset | 299b5dc64448d04abe6b35ee85fbd2b938c781bc | [
"Apache-2.0"
] | 11 | 2021-12-06T10:46:10.000Z | 2022-02-23T11:18:03.000Z | tests/integration_tests/security/api_tests.py | changeiot/superset | 299b5dc64448d04abe6b35ee85fbd2b938c781bc | [
"Apache-2.0"
] | 1 | 2022-03-09T02:57:17.000Z | 2022-03-09T02:57:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Tests for security api methods"""
import json
import jwt
from tests.integration_tests.base_tests import SupersetTestCase
from flask_wtf.csrf import generate_csrf
from superset.utils.urls import get_url_host
| 34.676768 | 85 | 0.669968 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Tests for security api methods"""
import json
import jwt
from tests.integration_tests.base_tests import SupersetTestCase
from flask_wtf.csrf import generate_csrf
from superset.utils.urls import get_url_host
class TestSecurityCsrfApi(SupersetTestCase):
resource_name = "security"
def _assert_get_csrf_token(self):
uri = f"api/v1/{self.resource_name}/csrf_token/"
response = self.client.get(uri)
self.assert200(response)
data = json.loads(response.data.decode("utf-8"))
self.assertEqual(generate_csrf(), data["result"])
def test_get_csrf_token(self):
"""
Security API: Test get CSRF token
"""
self.login(username="admin")
self._assert_get_csrf_token()
def test_get_csrf_token_gamma(self):
"""
Security API: Test get CSRF token by gamma
"""
self.login(username="gamma")
self._assert_get_csrf_token()
def test_get_csrf_unauthorized(self):
"""
Security API: Test get CSRF no login
"""
self.logout()
uri = f"api/v1/{self.resource_name}/csrf_token/"
response = self.client.get(uri)
self.assert401(response)
class TestSecurityGuestTokenApi(SupersetTestCase):
uri = f"api/v1/security/guest_token/"
def test_post_guest_token_unauthenticated(self):
"""
Security API: Cannot create a guest token without authentication
"""
self.logout()
response = self.client.post(self.uri)
self.assert401(response)
def test_post_guest_token_unauthorized(self):
"""
Security API: Cannot create a guest token without authorization
"""
self.login(username="gamma")
response = self.client.post(self.uri)
self.assert403(response)
def test_post_guest_token_authorized(self):
self.login(username="admin")
user = {"username": "bob", "first_name": "Bob", "last_name": "Also Bob"}
resource = {"type": "dashboard", "id": "blah"}
rls_rule = {"dataset": 1, "clause": "1=1"}
params = {"user": user, "resources": [resource], "rls": [rls_rule]}
response = self.client.post(
self.uri, data=json.dumps(params), content_type="application/json"
)
self.assert200(response)
token = json.loads(response.data)["token"]
decoded_token = jwt.decode(
token, self.app.config["GUEST_TOKEN_JWT_SECRET"], audience=get_url_host()
)
self.assertEqual(user, decoded_token["user"])
self.assertEqual(resource, decoded_token["resources"][0])
| 1,043 | 1,327 | 46 |
7d5c15e9d80b93ed503999c654aa06483ce59032 | 3,137 | py | Python | src/day-8/main.py | tpssim/advent-of-code-2021 | 14f534d63b56d23a1641bdc252bc6be29e32e3e3 | [
"MIT"
] | null | null | null | src/day-8/main.py | tpssim/advent-of-code-2021 | 14f534d63b56d23a1641bdc252bc6be29e32e3e3 | [
"MIT"
] | null | null | null | src/day-8/main.py | tpssim/advent-of-code-2021 | 14f534d63b56d23a1641bdc252bc6be29e32e3e3 | [
"MIT"
] | null | null | null |
# Parse the input from the input file
input_file = 'example_input.txt'
inputs = []
outputs = []
with open(input_file) as input:
for line in input.readlines():
split_line = line.split(' | ')
# Sort every string alphabetically to make further analysis easier
inputs.append([''.join(sorted(digit)) for digit in split_line[0].split()])
outputs.append([''.join(sorted(digit)) for digit in split_line[1].split()])
# Task 1
# Count the digits 1, 4, 7 and 8 in all outputs
# (They use 2, 4, 3 and 7 segments respectively)
count = 0
for output in outputs:
for digit in output:
if len(digit) in {2, 4, 3, 7}:
count += 1
print(f'Digits 1, 4, 7 and 8 appear {count} times in the output values.')
print()
# Task 2
# Decode every output into a 4 digit number
decoded_outputs = [0] * len(outputs)
for i, input in enumerate(inputs):
# Index of a sequence in decode_key will tell the number that sequence represents
decode_key = [''] * 10
# First the obivous ones (1, 4, 7, 8)
decode_key[1] = [digit for digit in input if len(digit) == 2][0]
decode_key[4] = [digit for digit in input if len(digit) == 4][0]
decode_key[7] = [digit for digit in input if len(digit) == 3][0]
decode_key[8] = [digit for digit in input if len(digit) == 7][0]
# 3 is only one with length of 5 and containing both segments of 1
decode_key[3] = ([digit for digit in input if len(digit) == 5 and
all([decode_key[1][0] in digit, decode_key[1][1] in digit])][0])
# 6 is only one with length of 6 and not containing all segments of 7
decode_key[6] = ([digit for digit in input if len(digit) == 6 and
not all([decode_key[7][0] in digit,
decode_key[7][1] in digit,
decode_key[7][2] in digit])
][0])
# 9 is only one with length of 6 and containing all segments of 3
decode_key[9] = ([digit for digit in input if len(digit) == 6 and
all([decode_key[3][0] in digit,
decode_key[3][1] in digit,
decode_key[3][2] in digit,
decode_key[3][3] in digit,
decode_key[3][4] in digit])
][0])
# 5 is only one that shares all its segments with 6
decode_key[5] = ([digit for digit in input if len(digit) == 5 and
all([digit[0] in decode_key[6],
digit[1] in decode_key[6],
digit[2] in decode_key[6],
digit[3] in decode_key[6],
digit[4] in decode_key[6]])
][0])
# 2 is the remaining one with length 5
decode_key[2] = [digit for digit in input if len(digit) == 5 and digit not in decode_key][0]
# 0 is the remaining one
decode_key[0] = [digit for digit in input if digit not in decode_key][0]
# Use the decode key to decode the outputs
decoded_outputs[i] = int(''.join([str(decode_key.index(digit)) for digit in outputs[i]]))
# Sum of all decoded outputs
output_sum = sum(decoded_outputs)
print(f'Sum of all decoded output values is {output_sum}.') | 35.247191 | 94 | 0.602486 |
# Parse the input from the input file
input_file = 'example_input.txt'
inputs = []
outputs = []
with open(input_file) as input:
for line in input.readlines():
split_line = line.split(' | ')
# Sort every string alphabetically to make further analysis easier
inputs.append([''.join(sorted(digit)) for digit in split_line[0].split()])
outputs.append([''.join(sorted(digit)) for digit in split_line[1].split()])
# Task 1
# Count the digits 1, 4, 7 and 8 in all outputs
# (They use 2, 4, 3 and 7 segments respectively)
count = 0
for output in outputs:
for digit in output:
if len(digit) in {2, 4, 3, 7}:
count += 1
print(f'Digits 1, 4, 7 and 8 appear {count} times in the output values.')
print()
# Task 2
# Decode every output into a 4 digit number
decoded_outputs = [0] * len(outputs)
for i, input in enumerate(inputs):
# Index of a sequence in decode_key will tell the number that sequence represents
decode_key = [''] * 10
# First the obivous ones (1, 4, 7, 8)
decode_key[1] = [digit for digit in input if len(digit) == 2][0]
decode_key[4] = [digit for digit in input if len(digit) == 4][0]
decode_key[7] = [digit for digit in input if len(digit) == 3][0]
decode_key[8] = [digit for digit in input if len(digit) == 7][0]
# 3 is only one with length of 5 and containing both segments of 1
decode_key[3] = ([digit for digit in input if len(digit) == 5 and
all([decode_key[1][0] in digit, decode_key[1][1] in digit])][0])
# 6 is only one with length of 6 and not containing all segments of 7
decode_key[6] = ([digit for digit in input if len(digit) == 6 and
not all([decode_key[7][0] in digit,
decode_key[7][1] in digit,
decode_key[7][2] in digit])
][0])
# 9 is only one with length of 6 and containing all segments of 3
decode_key[9] = ([digit for digit in input if len(digit) == 6 and
all([decode_key[3][0] in digit,
decode_key[3][1] in digit,
decode_key[3][2] in digit,
decode_key[3][3] in digit,
decode_key[3][4] in digit])
][0])
# 5 is only one that shares all its segments with 6
decode_key[5] = ([digit for digit in input if len(digit) == 5 and
all([digit[0] in decode_key[6],
digit[1] in decode_key[6],
digit[2] in decode_key[6],
digit[3] in decode_key[6],
digit[4] in decode_key[6]])
][0])
# 2 is the remaining one with length 5
decode_key[2] = [digit for digit in input if len(digit) == 5 and digit not in decode_key][0]
# 0 is the remaining one
decode_key[0] = [digit for digit in input if digit not in decode_key][0]
# Use the decode key to decode the outputs
decoded_outputs[i] = int(''.join([str(decode_key.index(digit)) for digit in outputs[i]]))
# Sum of all decoded outputs
output_sum = sum(decoded_outputs)
print(f'Sum of all decoded output values is {output_sum}.') | 0 | 0 | 0 |
b0b95eedc79611355c612ba38534a5218d9a9835 | 4,351 | py | Python | tests/test_nullmodels.py | sztal/pathcensus | 0246b1450e5d7fa0421e283f980c367100fcdd6a | [
"MIT"
] | null | null | null | tests/test_nullmodels.py | sztal/pathcensus | 0246b1450e5d7fa0421e283f980c367100fcdd6a | [
"MIT"
] | null | null | null | tests/test_nullmodels.py | sztal/pathcensus | 0246b1450e5d7fa0421e283f980c367100fcdd6a | [
"MIT"
] | null | null | null | """Unit tests for :mod:`pathcensus.nullmodels`."""
# pylint: disable=redefined-outer-name
import random
from itertools import product
import pytest
import numpy as np
from pathcensus.nullmodels import UBCM, UECM
from pathcensus.utils import rowsums, set_numba_seed
from pathcensus.utils import relclose
from tests.utils import make_er_graph, make_rgg, add_random_weights
from tests.utils import get_largest_component
FAMILY = ("erdos_renyi", "geometric")
SEEDS = (20, 40)
_params = list(product(FAMILY, SEEDS))
_methods = ("newton", "fixed-point")
_ubcm_params = list(product(["cm_exp", "cm"], _methods))
_uecm_params = list(product(["ecm_exp", "ecm"], _methods))
@pytest.fixture(scope="session", params=_params)
def small_graph(request):
"""Generate some small graphs (ER and RGG)."""
family, seed = request.param
random.seed(seed)
if family == "geometric":
graph = get_largest_component(make_rgg(50, 5))
else:
graph = get_largest_component(make_er_graph(50, 5))
return graph, seed
@pytest.fixture(scope="session", params=_ubcm_params)
def small_graph_ubcm(request, small_graph):
"""Generate some small graphs (ER and RGG)."""
model, method = request.param
graph, seed = small_graph
ubcm = UBCM(graph)
ubcm.fit(model, method)
return ubcm, seed, graph
@pytest.fixture(scope="session", params=_uecm_params)
class TestUBCM:
"""Unit tests for Unweighted Binary Configuration Model."""
def test_ubcm(self, small_graph_ubcm):
"""Test whether the expected degree sequence in UBCM approximates
the observed sequence.
"""
ubcm, *_ = small_graph_ubcm
rtol = 1e-6 if ubcm.fit_args["method"] == "newton" else 1e-3
assert ubcm.is_fitted()
assert ubcm.is_valid(rtol)
P = ubcm.get_P(dense=True)
assert relclose(P.sum(axis=1), ubcm.D, rtol=rtol)
def test_ubcm_sampling(self, small_graph_ubcm):
"""Test convergence of the average over degree sequences sampled
from UBCM towards the observed sequence.
"""
ubcm, seed, _ = small_graph_ubcm
rtol = 1e-1 if ubcm.fit_args["method"] == "newton" else 1e-1
D = ubcm.D
E = np.zeros_like(D, dtype=float)
n = 1000
set_numba_seed(seed)
for rand in ubcm.sample(n):
E += rowsums(rand)
E = E / n
assert relclose(D, E, rtol=rtol)
def test_ubcm_seed(self, small_graph_ubcm):
"""Test if setting random seed for sampling works correctly."""
ubcm, seed, _ = small_graph_ubcm
set_numba_seed(seed)
A1 = ubcm.sample_one()
set_numba_seed(seed)
A2 = ubcm.sample_one()
assert (A1 != A2).count_nonzero() == 0
class TestUECM:
"""Unit tests for Unweighted Enhanced Configuration Model."""
def test_uecm(self, small_graph_uecm):
"""Test whether the expected degree and strength sequences in UECM
approximate the observed sequences.
"""
uecm, *_ = small_graph_uecm
rtol = 1e-1 if uecm.fit_args["method"] == "newton" else 2e-1
assert uecm.is_fitted()
assert uecm.is_valid(rtol)
P = uecm.get_P(dense=True)
W = uecm.get_W(dense=True)
assert relclose(P.sum(axis=1), uecm.D, rtol=rtol)
assert relclose(W.sum(axis=1), uecm.S, rtol=rtol)
def test_uecm_sampling(self, small_graph_uecm):
"""Test convergence of the averages over degree and strength
sequences sampled from UECM towards the observed sequences.
"""
uecm, seed, _ = small_graph_uecm
rtol = 1e-1 if uecm.fit_args["method"] == "newton" else 2e-1
D = uecm.D
S = uecm.S
ED = np.zeros_like(D, dtype=float)
ES = np.zeros_like(S, dtype=float)
n = 1000
set_numba_seed(seed)
for rand in uecm.sample(n):
ES += rowsums(rand)
rand.data[:] = 1
ED += rowsums(rand)
ED /= n
ES /= n
assert relclose(D, ED, rtol=rtol)
assert relclose(S, ES, rtol=rtol)
| 31.078571 | 74 | 0.643071 | """Unit tests for :mod:`pathcensus.nullmodels`."""
# pylint: disable=redefined-outer-name
import random
from itertools import product
import pytest
import numpy as np
from pathcensus.nullmodels import UBCM, UECM
from pathcensus.utils import rowsums, set_numba_seed
from pathcensus.utils import relclose
from tests.utils import make_er_graph, make_rgg, add_random_weights
from tests.utils import get_largest_component
FAMILY = ("erdos_renyi", "geometric")
SEEDS = (20, 40)
_params = list(product(FAMILY, SEEDS))
_methods = ("newton", "fixed-point")
_ubcm_params = list(product(["cm_exp", "cm"], _methods))
_uecm_params = list(product(["ecm_exp", "ecm"], _methods))
@pytest.fixture(scope="session", params=_params)
def small_graph(request):
"""Generate some small graphs (ER and RGG)."""
family, seed = request.param
random.seed(seed)
if family == "geometric":
graph = get_largest_component(make_rgg(50, 5))
else:
graph = get_largest_component(make_er_graph(50, 5))
return graph, seed
@pytest.fixture(scope="session", params=_ubcm_params)
def small_graph_ubcm(request, small_graph):
"""Generate some small graphs (ER and RGG)."""
model, method = request.param
graph, seed = small_graph
ubcm = UBCM(graph)
ubcm.fit(model, method)
return ubcm, seed, graph
@pytest.fixture(scope="session", params=_uecm_params)
def small_graph_uecm(request, small_graph):
model, method = request.param
graph, seed = small_graph
np.random.seed(seed)
graph = add_random_weights(graph)
uecm = UECM(graph)
uecm.fit(model, method)
return uecm, seed, graph
class TestUBCM:
"""Unit tests for Unweighted Binary Configuration Model."""
def test_ubcm(self, small_graph_ubcm):
"""Test whether the expected degree sequence in UBCM approximates
the observed sequence.
"""
ubcm, *_ = small_graph_ubcm
rtol = 1e-6 if ubcm.fit_args["method"] == "newton" else 1e-3
assert ubcm.is_fitted()
assert ubcm.is_valid(rtol)
P = ubcm.get_P(dense=True)
assert relclose(P.sum(axis=1), ubcm.D, rtol=rtol)
def test_ubcm_sampling(self, small_graph_ubcm):
"""Test convergence of the average over degree sequences sampled
from UBCM towards the observed sequence.
"""
ubcm, seed, _ = small_graph_ubcm
rtol = 1e-1 if ubcm.fit_args["method"] == "newton" else 1e-1
D = ubcm.D
E = np.zeros_like(D, dtype=float)
n = 1000
set_numba_seed(seed)
for rand in ubcm.sample(n):
E += rowsums(rand)
E = E / n
assert relclose(D, E, rtol=rtol)
def test_ubcm_seed(self, small_graph_ubcm):
"""Test if setting random seed for sampling works correctly."""
ubcm, seed, _ = small_graph_ubcm
set_numba_seed(seed)
A1 = ubcm.sample_one()
set_numba_seed(seed)
A2 = ubcm.sample_one()
assert (A1 != A2).count_nonzero() == 0
class TestUECM:
"""Unit tests for Unweighted Enhanced Configuration Model."""
def test_uecm(self, small_graph_uecm):
"""Test whether the expected degree and strength sequences in UECM
approximate the observed sequences.
"""
uecm, *_ = small_graph_uecm
rtol = 1e-1 if uecm.fit_args["method"] == "newton" else 2e-1
assert uecm.is_fitted()
assert uecm.is_valid(rtol)
P = uecm.get_P(dense=True)
W = uecm.get_W(dense=True)
assert relclose(P.sum(axis=1), uecm.D, rtol=rtol)
assert relclose(W.sum(axis=1), uecm.S, rtol=rtol)
def test_uecm_sampling(self, small_graph_uecm):
"""Test convergence of the averages over degree and strength
sequences sampled from UECM towards the observed sequences.
"""
uecm, seed, _ = small_graph_uecm
rtol = 1e-1 if uecm.fit_args["method"] == "newton" else 2e-1
D = uecm.D
S = uecm.S
ED = np.zeros_like(D, dtype=float)
ES = np.zeros_like(S, dtype=float)
n = 1000
set_numba_seed(seed)
for rand in uecm.sample(n):
ES += rowsums(rand)
rand.data[:] = 1
ED += rowsums(rand)
ED /= n
ES /= n
assert relclose(D, ED, rtol=rtol)
assert relclose(S, ES, rtol=rtol)
| 229 | 0 | 22 |
9b37adbc6017848e838f3819b3a328f6adc4fde6 | 2,733 | py | Python | rta/models/rolling_median.py | MatteoLacki/rta | 93944d6fc934126e0bb4d076c8b4213cadbe49a1 | [
"BSD-2-Clause"
] | 1 | 2018-05-31T14:31:18.000Z | 2018-05-31T14:31:18.000Z | rta/models/rolling_median.py | MatteoLacki/rta | 93944d6fc934126e0bb4d076c8b4213cadbe49a1 | [
"BSD-2-Clause"
] | null | null | null | rta/models/rolling_median.py | MatteoLacki/rta | 93944d6fc934126e0bb4d076c8b4213cadbe49a1 | [
"BSD-2-Clause"
] | null | null | null | try:
import matplotlib.pyplot as plt
except ModuleNotFoundError:
plt = None
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import medfilt
from rta.models.interpolant import Interpolant
from rta.models.spline import Spline
from rta.array_operations.dedupy import dedup_np
from rta.math.splines import beta as beta_spline
class RollingMedian(Interpolant):
"""The rolling median interpolator.
Idea is as straight as a hair of a Mongol: get rid of the noise by
fitting a roling median and interpolate every other k-th median.
Of course, since we calculate all other medians too, we could get more
playful with their evaluation.
"""
def __init__(self, ws=51, k=10):
"""Constructor.
Args:
ws (odd int): window size.
k (int): each k-th median will be used for interpolation
"""
self.ws = ws
self.k = k
self.params = {'ws':ws, 'k':k}
def fit(self, x, y, sort=True):
"""Fit the model.
Args:
x (np.array): The control variable.
y (np.array): The response variable.
"""
if sort:
i = np.argsort(x)
x, y = x[i], y[i]
self.medians = medfilt(y, self.ws)
self.interpo = interp1d(x[::self.k],
self.medians[::self.k],
bounds_error=False,
fill_value=0)
self.x = x
self.y = y
#TODO: implement this.
class RolllingMedianSimple(RollingMedian):
"""Avoid calculating too many medians."""
def fit(self, x, y, sort=True):
"""Fit the model.
Args:
x (np.array): The control variable.
y (np.array): The response variable.
"""
pass
class RollingMedianSpline(Spline):
"""The rolling median spline."""
def __init__(self, ws=51, n=100):
"""Constructor.
Args:
ws (odd int): window size.
n (int): the number of nodes used for the beta spline (roughly correspond to 100/k-percentiles).
"""
self.ws = ws
self.n = n
self.params = {'ws':ws, 'n':n} # this is for copy to work
| 28.46875 | 108 | 0.562386 | try:
import matplotlib.pyplot as plt
except ModuleNotFoundError:
plt = None
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import medfilt
from rta.models.interpolant import Interpolant
from rta.models.spline import Spline
from rta.array_operations.dedupy import dedup_np
from rta.math.splines import beta as beta_spline
class RollingMedian(Interpolant):
"""The rolling median interpolator.
Idea is as straight as a hair of a Mongol: get rid of the noise by
fitting a roling median and interpolate every other k-th median.
Of course, since we calculate all other medians too, we could get more
playful with their evaluation.
"""
def __init__(self, ws=51, k=10):
"""Constructor.
Args:
ws (odd int): window size.
k (int): each k-th median will be used for interpolation
"""
self.ws = ws
self.k = k
self.params = {'ws':ws, 'k':k}
def __repr__(self):
return "RollingMedian(ws:{} k:{})".format(self.ws, self.k)
def fit(self, x, y, sort=True):
"""Fit the model.
Args:
x (np.array): The control variable.
y (np.array): The response variable.
"""
if sort:
i = np.argsort(x)
x, y = x[i], y[i]
self.medians = medfilt(y, self.ws)
self.interpo = interp1d(x[::self.k],
self.medians[::self.k],
bounds_error=False,
fill_value=0)
self.x = x
self.y = y
#TODO: implement this.
class RolllingMedianSimple(RollingMedian):
"""Avoid calculating too many medians."""
def fit(self, x, y, sort=True):
"""Fit the model.
Args:
x (np.array): The control variable.
y (np.array): The response variable.
"""
pass
class RollingMedianSpline(Spline):
"""The rolling median spline."""
def __init__(self, ws=51, n=100):
"""Constructor.
Args:
ws (odd int): window size.
n (int): the number of nodes used for the beta spline (roughly correspond to 100/k-percentiles).
"""
self.ws = ws
self.n = n
self.params = {'ws':ws, 'n':n} # this is for copy to work
def __repr__(self):
return "RollingMedianSpline(ws:{})".format(self.ws)
def fit(self, x, y, sort=True, dedup=True):
if sort:
i = np.argsort(x)
x, y = x[i], y[i]
self.x = x
self.y = y
x, y = dedup_np(x, y)
self.medians = medfilt(y, self.ws)
self.spline = beta_spline(x, self.medians, self.n)
| 392 | 0 | 81 |
62ea5c491d3cf3615721d2b2308cab1f249ad357 | 8,801 | py | Python | models/star_model.py | guanhuaw/DeepMAGiC | 92303abba51eb0971f4972cefca8565a00cb86e0 | [
"BSD-3-Clause"
] | 14 | 2020-04-16T22:19:17.000Z | 2022-02-13T08:28:35.000Z | models/star_model.py | guanhuaw/DeepMAGiC | 92303abba51eb0971f4972cefca8565a00cb86e0 | [
"BSD-3-Clause"
] | null | null | null | models/star_model.py | guanhuaw/DeepMAGiC | 92303abba51eb0971f4972cefca8565a00cb86e0 | [
"BSD-3-Clause"
] | 3 | 2020-06-19T01:31:48.000Z | 2021-08-23T13:49:57.000Z | import torch
import numpy as np
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from . import losses
from util.metrics import PSNR
import pytorch_msssim
import random
import torch.nn.functional as F
| 47.572973 | 127 | 0.595955 | import torch
import numpy as np
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from . import losses
from util.metrics import PSNR
import pytorch_msssim
import random
import torch.nn.functional as F
class StarModel(BaseModel):
def name(self):
return 'STARModel'
def label2onehot(self, batch_size, labels):
"""Convert label indices to one-hot vectors."""
dim = 6
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels] = 1
return out
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
# specify the training losses you want to print out. The program will call base_model.get_current_losses
if self.isTrain:
if self.train_phase == 'generator':
self.model_names = ['G']
self.loss_names = ['G_I_L1', 'G_I_L2', 'SSIM', 'PSNR']
else:
self.model_names = ['G', 'D']
self.loss_names = ['G_GAN_label', 'G_GAN_kind','G_I_L1', 'G_I_L2', 'D_GAN_label', 'D_GAN_kind', 'SSIM', 'PSNR']
if opt.use_vgg:
self.loss_names += ['vgg']
else: # during test time, only load Gs
self.model_names = ['G']
self.loss_names = ['SSIM', 'PSNR']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['real_A', 'fake_B', 'real_B']
self.netG = networks.define_G(self.opt, opt.input_nc, opt.output_nc, opt.ngf,
opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain,
self.gpu_ids)
self.criterionL1 = torch.nn.L1Loss()
self.criterionMSE = torch.nn.MSELoss()
if opt.use_vgg:
self.perceptual = losses.PerceptualLoss()
self.perceptual.initialize(self.criterionMSE)
self.ssim_loss = pytorch_msssim.SSIM(val_range=1)
if self.isTrain:
self.optimizers = []
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
if self.isTrain and self.train_phase == 'together':
self.no_wgan = opt.no_wgan
self.no_wgan_gp = opt.no_wgan_gp
if self.no_wgan_gp == False:
self.disc_step = opt.disc_step
else:
self.disc_step = 1
self.disc_model = opt.disc_model
use_sigmoid = opt.no_lsgan
if opt.disc_model == 'pix2pix':
self.netD = networks.define_D(opt.input_nc + 1, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain,
self.gpu_ids)
if opt.disc_model == 'traditional':
self.netD = networks.define_D(self.opt, 1, opt.ndf, opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain,
self.gpu_ids)
self.loss_wgan_gp = opt.loss_wgan_gp
self.fake_pool = ImagePool(opt.pool_size)
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, use_l1=not opt.no_l1gan).to(self.device)
self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_D)
def set_input(self, input, idx):
self.namelist = ['t1fse', 't2fse', 't1flair', 't2flair', 'pdfse', 'stir']
self.real_A = input['magic'].to(self.device)
if self.opt.isTrain:
self.idx = random.randrange(6)
else:
self.idx = idx
#self.idx = idx
self.label = self.namelist[self.idx]
self.label_size = self.real_A.shape[0]
self.cls = torch.tensor(np.ones(self.label_size)*self.idx, dtype=torch.int64).to(self.device)
self.label_channel = self.label2onehot(self.label_size, self.idx).to(self.device)
self.real_B = input[self.label].to(self.device)
self.image_paths = input['path']
def forward(self):
self.fake_B = self.netG(self.real_A, self.label_channel)
self.loss_PSNR = PSNR(self.real_B, self.fake_B)
self.loss_SSIM = self.ssim_loss(self.real_B.repeat(1, 3, 1, 1), self.fake_B.repeat(1, 3, 1, 1))
def backward_D(self):
if self.disc_model == 'pix2pix':
fake_AB = self.fake_pool.query(torch.cat((self.real_A, self.fake_B), 1))
pred_fake = self.netD(fake_AB.detach())
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
if self.disc_model == 'traditional':
fake_AB = self.fake_pool.query(self.fake_B)
pred_real_label, pred_real_kind = self.netD(self.real_B)
pred_fake_label, pred_fake_kind = self.netD(fake_AB.detach())
if self.no_wgan == False:
self.loss_D_GAN_fake = pred_fake.mean()
self.loss_D_GAN_real = -pred_real.mean()
elif self.no_wgan_gp == False:
self.loss_D_GAN_real = -pred_fake.mean()
self.loss_D_GAN_fake = pred_fake.mean()
alpha = torch.rand(self.kreal.size(0), 1, 1, 1).to(self.device)
x_hat = (alpha * self.kreal.data + (1 - alpha) * self.kfake.data).requires_grad_(True)
out_src = self.netD(x_hat)
self.d_loss_gp = losses.gradient_penalty(out_src, x_hat) * self.loss_wgan_gp
else:
self.loss_D_GAN_fake_label = self.criterionGAN(pred_fake_label, False)
self.loss_D_GAN_real_label = self.criterionGAN(pred_real_label, True)
self.loss_D_GAN_label = 0.5*(self.loss_D_GAN_fake_label+self.loss_D_GAN_real_label)*self.opt.gamma
self.loss_D_GAN_kind = F.cross_entropy(pred_real_kind, self.cls)*self.opt.kind
self.loss_D_GAN = (self.loss_D_GAN_label + self.loss_D_GAN_kind)*self.opt.beta*self.opt.loss_GAN
if self.no_wgan_gp == False:
self.loss_D_GAN = self.loss_D_GAN + self.d_loss_gp
self.loss_D_GAN.backward()
def backward_G(self):
# First, G(A) should fake the discriminator
if self.isTrain and self.train_phase == 'together':
if self.disc_model == 'pix2pix':
fake_AB = torch.cat((self.realA, self.fakeB), 1)
pred_fake = self.netD(fake_AB)
if self.disc_model == 'traditional':
pred_fake_label, pred_fake_kind = self.netD(self.fake_B)
if self.no_wgan == False:
self.loss_G_GAN = -pred_fake.mean()
elif self.no_wgan_gp == False:
self.loss_G_GAN = -pred_fake.mean()
else:
self.loss_G_GAN_label = self.criterionGAN(pred_fake_label, True)*self.opt.gamma
self.loss_G_GAN_kind = F.cross_entropy(pred_fake_kind, self.cls)*self.opt.kind
self.loss_G_GAN = self.loss_G_GAN_label + self.loss_G_GAN_kind
else:
self.loss_G_GAN = 0
self.loss_G_GAN = self.loss_G_GAN * self.opt.loss_GAN
# Second, G(A) = B
self.loss_G_I_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.loss_content_I_l1
self.loss_G_I_L2 = self.criterionMSE(self.fake_B, self.real_B) * self.opt.loss_content_I_l2
self.loss_G_CON_I = self.loss_G_I_L1 + self.loss_G_I_L2
self.loss_G = self.loss_G_CON_I + self.loss_G_GAN - self.loss_SSIM * self.opt.loss_ssim
if self.opt.use_vgg:
self.loss_vgg = self.perceptual.get_loss(self.fake_B.repeat(1,3,1,1),self.real_B.repeat(1,3,1,1))*self.opt.loss_vgg
self.loss_G = self.loss_G + self.loss_vgg
self.loss_G.backward()
def optimize_parameters(self):
if self.isTrain and self.train_phase == 'together':
self.forward()
self.set_requires_grad(self.netD, True)
for iter_d in range(self.disc_step):
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
self.set_requires_grad(self.netD, False)
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
else:
self.forward()
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
| 8,108 | 424 | 23 |
0e4bf75c037dbff3e63ba9e445f73dc18914cc4a | 2,196 | py | Python | memos/hdf5/dir_as_label.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | memos/hdf5/dir_as_label.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | memos/hdf5/dir_as_label.py | Bingwen-Hu/hackaway | 69727d76fd652390d9660e9ea4354ba5cc76dd5c | [
"BSD-2-Clause"
] | null | null | null | import h5py
import os
import unittest
import numpy as np
| 28.519481 | 83 | 0.576047 | import h5py
import os
class FaceHdf5Writer(object):
def __init__(self, hdf5Path):
if os.path.exists(hdf5Path):
raise ValueError("The supplied `hdf5path` already "
"exists and cannot be overwritten. Manually "
"delete before continuing.", hdf5Path)
self.db = h5py.File(hdf5Path, "w")
def write(self, label, data, **attrs):
dim = data.shape
maxshape = None, *dim[1:]
print(f"write into label: {label} as maxshape: {maxshape}...")
dset = self.db.create_dataset(label, dim, maxshape=maxshape, dtype='float')
dset[...] = data
for k, v in attrs.items():
dset.attrs[k] = v
return dset
def close(self):
self.db.close()
class FaceHdf5Reader(object):
def __init__(self, hdf5Path):
if not os.path.exists(hdf5Path):
raise ValueError("The supplied `hdf5path` does not "
"exist", hdf5Path)
self.db = h5py.File(hdf5Path)
def read(self, label):
dset = self.db[label]
return dset[:]
def close(self):
self.db.close()
import unittest
import numpy as np
class TestFaceHdf5Writer(unittest.TestCase):
def setUp(self):
self.writer = FaceHdf5Writer('test.h5')
def test_writer(self):
data = np.ones((7, 128), dtype=int)
dset = self.writer.write('test', data, **{'level': 1, 'active': True})
data_ = dset[...]
self.assertTupleEqual(data_.shape, data.shape)
# test attributes
self.assertEqual(dset.attrs['active'], True)
self.assertEqual(dset.attrs['level'], 1)
# another test
data = np.ones((100, 64, 64), dtype=float)
dset = self.writer.write('image', data)
data_ = dset[...]
self.assertTupleEqual(data_.shape, data.shape)
# close here and test reader
self.writer.close()
reader = FaceHdf5Reader('test.h5')
image = reader.read('image')
self.assertTupleEqual(image.shape, data.shape)
reader.close()
def tearDown(self):
self.writer.close()
os.remove('test.h5')
| 1,761 | 39 | 335 |
a61c2f0d8b9b2a3c40c353b6dd04cca283f1aa70 | 11,553 | py | Python | map_objects/game_map.py | Nicocchi/rougelike | c8cc2ee4f3c1a12091a50a961de4b47c83e4e3b5 | [
"MIT"
] | 1 | 2020-01-21T11:30:09.000Z | 2020-01-21T11:30:09.000Z | map_objects/game_map.py | Nicocchi/rougelike | c8cc2ee4f3c1a12091a50a961de4b47c83e4e3b5 | [
"MIT"
] | 4 | 2021-04-30T21:05:28.000Z | 2022-03-12T00:13:13.000Z | map_objects/game_map.py | Nicocchi/rougelike | c8cc2ee4f3c1a12091a50a961de4b47c83e4e3b5 | [
"MIT"
] | null | null | null | import tcod as libtcodpy
from random import randint
from components.ai import BasicMonster
from components.equipment import EquipmentSlots
from components.equippable import Equippable
from components.fighter import Fighter
from components.item import Item
from components.stairs import Stairs
from entity import Entity
from game_messages import Message
from item_functions import cast_confuse, cast_fireball, cast_lightning, heal
from map_objects.rectangle import Rect
from map_objects.tile import Tile
from random_utils import from_dungeon_level, random_choice_from_dict
from render_functions import RenderOrder
DEPTH = 10
MIN_SIZE = 5
FULL_ROOMS = False
MAP_WIDTH = 63
MAP_HEIGHT = 40
bsp_rooms = []
# Create next floor and heal player
| 46.963415 | 124 | 0.56903 | import tcod as libtcodpy
from random import randint
from components.ai import BasicMonster
from components.equipment import EquipmentSlots
from components.equippable import Equippable
from components.fighter import Fighter
from components.item import Item
from components.stairs import Stairs
from entity import Entity
from game_messages import Message
from item_functions import cast_confuse, cast_fireball, cast_lightning, heal
from map_objects.rectangle import Rect
from map_objects.tile import Tile
from random_utils import from_dungeon_level, random_choice_from_dict
from render_functions import RenderOrder
DEPTH = 10
MIN_SIZE = 5
FULL_ROOMS = False
MAP_WIDTH = 63
MAP_HEIGHT = 40
bsp_rooms = []
class GameMap:
def __init__(self, width, height, dungeon_level=1):
self.width = width
self.height = height
self.tiles = self.initialize_tiles()
self.dungeon_level = dungeon_level
def initialize_tiles(self):
tiles = [[Tile(True) for y in range(self.height)] for x in range(self.width)]
return tiles
def make_map(self, max_rooms, room_min_size, room_max_size, map_width, map_height, player, entities):
rooms = []
num_rooms = 0
center_of_last_room_x = None
center_of_last_room_y = None
for r in range(max_rooms):
# Random width and height
w = randint(room_min_size, room_max_size)
h = randint(room_min_size, room_max_size)
# Random position without going out of the boundaries of the map
x = randint(0, map_width - w - 1)
y = randint(0, map_height - h - 1)
# "Rect" class makes rectangles easier to work with
new_room = Rect(x, y, w, h)
# Run through the other rooms and see if they intersect with this one
for other_room in rooms:
if new_room.intersect(other_room):
break
else:
# This means there are no intersections, so this room is valid
# "paint" it to the map's tiles
self.create_room(new_room)
# Center coordinates of new room
(new_x, new_y) = new_room.center()
center_of_last_room_x = new_x
center_of_last_room_y = new_y
if num_rooms == 0:
# This is the first room, where the player starst at
player.x = new_x
player.y = new_y
else:
# All rooms after the first:
# Connect it to the previous room with a tunnel
# Center coordinates of previous room
(prev_x, prev_y) = rooms[num_rooms - 1].center()
# Flip a coin (random number that is either 0 or 1)
if randint(0, 1) == 1:
# First move horizontally, then vertically
self.create_h_tunnel(prev_x, new_x, prev_y)
self.create_v_tunnel(prev_y, new_y, new_x)
else:
# First move vertically, then horizontally
self.create_v_tunnel(prev_y, new_y, prev_x)
self.create_h_tunnel(prev_x, new_x, new_y)
self.place_entities(new_room, entities)
# Append the new room to the list
rooms.append(new_room)
num_rooms += 1
stairs_component = Stairs(self.dungeon_level + 1)
down_stairs = Entity(center_of_last_room_x, center_of_last_room_y, '>', libtcodpy.white, 'Stairs',
render_order=RenderOrder.STAIRS, stairs=stairs_component)
entities.append(down_stairs)
def create_room(self, room):
# Go through the tiles int he rectangle and make them passable
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
self.tiles[x][y].blocked = False
self.tiles[x][y].block_sight = False
def create_h_tunnel(self, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2) + 1):
self.tiles[x][y].blocked = False
self.tiles[x][y].block_sight = False
def create_v_tunnel(self, y1, y2, x):
for y in range(min(y1, y2), max(y1, y2) + 1):
self.tiles[x][y].blocked = False
self.tiles[x][y].block_sight = False
def place_entities(self, room, entities):
max_monsters_per_room = from_dungeon_level([[2, 1], [3, 4], [5, 6]], self.dungeon_level)
max_items_per_room = from_dungeon_level([[1, 1], [2, 4]], self.dungeon_level)
# Get a random number of monsters
number_of_monsters = randint(0, max_monsters_per_room)
number_of_items = randint(0, max_items_per_room)
monster_chances = {
'orc': 80,
'rat': 60,
'troll': from_dungeon_level([[15, 3], [30, 5], [60, 7]], self.dungeon_level)}
item_chances = {
'healing_potion': 35,
'sword': from_dungeon_level([[5, 4]], self.dungeon_level),
'shield': from_dungeon_level([[15, 8]], self.dungeon_level),
'lightning_scroll': from_dungeon_level([[25, 4]], self.dungeon_level),
'fireball_scroll': from_dungeon_level([[25, 6]], self.dungeon_level),
'confusion_scroll': from_dungeon_level([[10, 2]], self.dungeon_level)}
for i in range(number_of_monsters):
# Choose a random location in the room
x = randint(room.x1 + 1, room.x2 - 1)
y = randint(room.y1 + 1, room.y2 - 1)
if not any([entity for entity in entities if entity.x == x and entity.y == y]):
monster_choice = random_choice_from_dict(monster_chances)
orc_hp = from_dungeon_level([[20, 1], [25, 6], [60, 15]], self.dungeon_level)
orc_strength = from_dungeon_level([[4, 1], [6, 6], [8, 15]], self.dungeon_level)
troll_hp = from_dungeon_level([[30, 1], [36, 6], [40, 15]], self.dungeon_level)
troll_strength = from_dungeon_level([[8, 1], [10, 6], [12, 15]], self.dungeon_level)
libtcodpy.namegen_parse('data/mingos_demon.cfg')
name_bool = randint(0, 1)
male_name = libtcodpy.namegen_generate('demon male')
female_name = libtcodpy.namegen_generate('demon female')
if monster_choice == 'orc':
fighter_component = Fighter(hp=orc_hp, defense=0, strength=orc_strength, dexterity=0,
intelligence=0, charisma=0,
xp=35)
ai_component = BasicMonster()
monster = Entity(x, y, 'O', libtcodpy.Color(27,105,0),
'{0}'.format(male_name if name_bool == 0 else female_name), blocks=True,
render_order=RenderOrder.ACTOR, fighter=fighter_component, ai=ai_component)
elif monster_choice == 'troll':
fighter_component = Fighter(hp=troll_hp, defense=2, strength=troll_strength, dexterity=0,
intelligence=0, charisma=0,
xp=100)
ai_component = BasicMonster()
monster = Entity(x, y, 'T', libtcodpy.Color(27,105,0),
'{0}'.format(male_name if name_bool == 0 else female_name), blocks=True,
render_order=RenderOrder.ACTOR, fighter=fighter_component, ai=ai_component)
else:
fighter_component = Fighter(hp=4, defense=0, strength=2, dexterity=0,
intelligence=0, charisma=0,
xp=10)
ai_component = BasicMonster()
monster = Entity(x, y, 'R', libtcodpy.Color(27,105,0),
'{0}'.format(male_name if name_bool == 0 else female_name), blocks=True,
render_order=RenderOrder.ACTOR, fighter=fighter_component, ai=ai_component)
entities.append(monster)
for i in range(number_of_items):
x = randint(room.x1 + 1, room.x2 - 1)
y = randint(room.y1 + 1, room.y2 - 1)
if not any([entity for entity in entities if entity.x == x and entity.y == y]):
item_choice = random_choice_from_dict(item_chances)
if item_choice == 'healing_potion':
item_component = Item(use_function=heal, amount=40)
item = Entity(x, y, '!', libtcodpy.Color(105,69,255), 'Healing Potion', render_order=RenderOrder.ITEM,
item=item_component)
elif item_choice == 'fireball_scroll':
item_component = Item(use_function=cast_fireball, targeting=True, targeting_message=Message(
'Left-click a target tile for the fireball, or right-click to cancel.', libtcodpy.light_cyan),
damage=25, radius=3)
item = Entity(x, y, '#', libtcodpy.Color(186,251,24), 'Fireball Scroll', render_order=RenderOrder.ITEM,
item=item_component)
elif item_choice == 'sword':
equippable_component = Equippable(EquipmentSlots.MAIN_HAND, strength_bonus=3)
item = Entity(x, y, '/', libtcodpy.sky, 'Sword', equippable=equippable_component)
elif item_choice == 'shield':
equippable_component = Equippable(EquipmentSlots.OFF_HAND, defense_bonus=1)
item = Entity(x, y, '[', libtcodpy.Color(138,20,0), 'Shield', equippable=equippable_component)
elif item_choice == 'confusion_scroll':
item_component = Item(use_function=cast_confuse, targeting=True, targeting_message=Message(
'Left-click an enemy to confuse it, or right-click to cancel.', libtcodpy.light_cyan))
item = Entity(x, y, '#', libtcodpy.Color(186,251,24), 'Confusion Scroll', render_order=RenderOrder.ITEM,
item=item_component)
else:
item_component = Item(use_function=cast_lightning, damage=40, maximum_range=5)
item = Entity(x, y, '#', libtcodpy.Color(186,251,24), 'Lightning Scroll', render_order=RenderOrder.ITEM,
item=item_component)
entities.append(item)
def is_blocked(self, x, y):
if self.tiles[x][y].blocked:
return True
return False
# Create next floor and heal player
def next_floor(self, player, message_log, constants):
self.dungeon_level += 1
entities = [player]
self.tiles = self.initialize_tiles()
self.make_map(constants['max_rooms'], constants['room_min_size'], constants['room_max_size'],
constants['map_width'], constants['map_height'], player, entities)
player.fighter.heal(player.fighter.max_hp // 2)
message_log.add_message(
Message('You take a moment to rest, and recover your strength.', libtcodpy.light_violet))
return entities
| 10,548 | -7 | 264 |
099fdd831a65b2a78353d24b34be9c17c37cba63 | 1,551 | py | Python | fairml/non_linear_expansion.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 330 | 2017-02-24T08:34:39.000Z | 2022-02-24T15:41:19.000Z | fairml/non_linear_expansion.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 14 | 2017-02-02T00:54:16.000Z | 2021-02-19T16:01:20.000Z | fairml/non_linear_expansion.py | ravish0007/fairml | bdfb707ff9554c1a789dc8de3926c1ef3cfb1fc8 | [
"MIT"
] | 70 | 2017-01-31T20:51:10.000Z | 2022-02-17T07:38:52.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from collections import defaultdict
from random import randint
import six
| 31.02 | 67 | 0.580916 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from collections import defaultdict
from random import randint
import six
def return_non_linear_transformation(v1, poly, log, square_root,
exponential, sin, cos):
A = np.zeros((v1.shape[0], 1))
A[:, 0] = v1
if poly > 1:
for i in range(2, poly + 1):
current_power = v1**i
current_power = np.reshape(current_power,
(current_power.shape[0], 1))
A = np.append(A, current_power, axis=1)
if square_root:
sqrt = np.sqrt(v1 + nb.abs(min(v1)) + 1)
sqrt = np.reshape(sqrt, (sqrt.shape[0], 1))
A = np.append(A, sqrt, axis=1)
if exponential:
exp_term = np.exp(v1 - np.max(v1))
exp_term = np.reshape(exp_term, (exp_term.shape[0], 1))
A = np.append(A, exp_term, axis=1)
if log:
# shift the entire vector by the minimum value, which is +1
log_term = np.log(v1 + np.abs(min(v1) + 1))
log_term = np.reshape(log_term, (log_term.shape[0], 1))
A = np.append(A, log_term, axis=1)
if sin:
sin_term = np.sin(v1)
sin_Term = np.reshape(sin_term, (sin_term.shape[0], 1))
A = np.append(A, sin_term, axis=1)
if cos:
cos_term = np.cos(v1)
cos_term = np.reshape(cos_term, (cos_term.shape[0], 1))
A = np.append(A, cos_term, axis=1)
return A
| 1,304 | 0 | 23 |
83ec1c231ff393b0586eb485267dbbf932d48ceb | 776 | py | Python | 2018/05.py | bernikr/advent-of-code | 331e5257b9f812776d0723c7cdec349770498d34 | [
"MIT"
] | 1 | 2020-12-06T13:07:55.000Z | 2020-12-06T13:07:55.000Z | 2018/05.py | bernikr/advent-of-code | 331e5257b9f812776d0723c7cdec349770498d34 | [
"MIT"
] | null | null | null | 2018/05.py | bernikr/advent-of-code | 331e5257b9f812776d0723c7cdec349770498d34 | [
"MIT"
] | null | null | null | from aocd import get_data
if __name__ == '__main__':
data = get_data(day=5, year=2018)
inp = data
print(part1(inp))
print(part2(inp))
| 22.823529 | 99 | 0.490979 | from aocd import get_data
def react(p):
diff = abs(ord('A')-ord('a'))
poly = list(p)
changed = True
while changed:
changed = False
for i in range(0, len(poly)-1):
if poly[i] != '-' and abs(ord(poly[i])-ord(poly[i+1])) == diff:
poly[i] = '-'
poly[i+1] = '-'
changed = True
poly = [x for x in poly if x != '-']
return poly
def part1(a):
return len(react(a))
def part2(a):
return min(len(react([x for x in a if x != e1 and x != e2]))
for e1, e2 in set((e, chr(ord(e)+32)) for e in a if ord('A') <= ord(e) <= ord('Z')))
if __name__ == '__main__':
data = get_data(day=5, year=2018)
inp = data
print(part1(inp))
print(part2(inp))
| 552 | 0 | 69 |
1d81218b8e60e702a2bfb121380ad87f7fae6b8d | 79 | py | Python | o3d3xx/pcic/__init__.py | Galoshi/o3d3xx-python | e57e955a2403ed336fa54d92e68c2a980384e55a | [
"MIT"
] | 7 | 2019-07-29T13:45:29.000Z | 2021-07-19T22:05:39.000Z | o3d3xx/pcic/__init__.py | Galoshi/o3d3xx-python | e57e955a2403ed336fa54d92e68c2a980384e55a | [
"MIT"
] | 12 | 2018-12-10T14:22:00.000Z | 2021-09-17T13:14:53.000Z | o3d3xx/pcic/__init__.py | Galoshi/o3d3xx-python | e57e955a2403ed336fa54d92e68c2a980384e55a | [
"MIT"
] | 8 | 2018-10-15T08:51:45.000Z | 2022-01-21T15:18:39.000Z | from .client import *
from .image_client import *
from .format_client import *
| 19.75 | 28 | 0.772152 | from .client import *
from .image_client import *
from .format_client import *
| 0 | 0 | 0 |
581b5ad28dcb7302d27dd121bec6759029fd818e | 621 | py | Python | setup.py | aakashns/swiftai | 7f18237d5b92ae9c016a595d01ef6ee207855b2f | [
"MIT"
] | null | null | null | setup.py | aakashns/swiftai | 7f18237d5b92ae9c016a595d01ef6ee207855b2f | [
"MIT"
] | null | null | null | setup.py | aakashns/swiftai | 7f18237d5b92ae9c016a595d01ef6ee207855b2f | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="swiftai",
version="0.1",
author="Aakash N S",
author_email="opensource@swiftace.ai",
description="Utilities and helper functions for Pytorch and FastAI deep learning libraries",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aakashns/swiftai",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
) | 29.571429 | 94 | 0.719807 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="swiftai",
version="0.1",
author="Aakash N S",
author_email="opensource@swiftace.ai",
description="Utilities and helper functions for Pytorch and FastAI deep learning libraries",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aakashns/swiftai",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
) | 0 | 0 | 0 |
94aed4e7eefe30a4863f63ee3ec88ba50525aa7a | 6,927 | py | Python | training/trainingdata.py | imdatsolak/bender | e20a5c7553d0db60440573b4fc3e907d6a8d5fad | [
"BSD-3-Clause"
] | null | null | null | training/trainingdata.py | imdatsolak/bender | e20a5c7553d0db60440573b4fc3e907d6a8d5fad | [
"BSD-3-Clause"
] | null | null | null | training/trainingdata.py | imdatsolak/bender | e20a5c7553d0db60440573b4fc3e907d6a8d5fad | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
import logging
import nltk.data
from core import utils
from core.progressbar import ProgressBar, Percentage, Bar, ETA, FormatLabel, AnimatedMarker
import codecs
from modules.brain.mlbrain import MLBrain
from modules.machinelogic.imachinelogic.mlimachinelogic import MLInternalMachineLogicTrainer
from modules.nlp.mlnlp import MLNLP
from modules.concept.mlconcept import MLConcept
from modules.mlbendertrainingmodule import MLBenderTrainingModule
import shutil
import pickle
import json
"""
Training-Data-Structure:
class TrainingDataConverter
Copyright (c) 2019 Imdat Solak
Written: 2017-04-12 00:00 CET, ISO
"""
| 48.78169 | 155 | 0.700592 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
import logging
import nltk.data
from core import utils
from core.progressbar import ProgressBar, Percentage, Bar, ETA, FormatLabel, AnimatedMarker
import codecs
from modules.brain.mlbrain import MLBrain
from modules.machinelogic.imachinelogic.mlimachinelogic import MLInternalMachineLogicTrainer
from modules.nlp.mlnlp import MLNLP
from modules.concept.mlconcept import MLConcept
from modules.mlbendertrainingmodule import MLBenderTrainingModule
import shutil
import pickle
import json
"""
Training-Data-Structure:
class TrainingDataConverter
Copyright (c) 2019 Imdat Solak
Written: 2017-04-12 00:00 CET, ISO
"""
class TrainingDataConverter(object):
def __init__(self, configDictionary):
self.configDictionary = configDictionary
self.train_data_source_file = utils.getKeyFromSectionInConfiguration('bender-training', 'train_data_source_file', None, configDictionary)
if not self.train_data_source_file:
print("Config does not contain 'train_data_source_file', please provide one.")
exit(1)
self.query_media_type = utils.getKeyFromSectionInConfiguration('bender-training', 'query_media_type', None, configDictionary)
self.response_media_type = utils.getKeyFromSectionInConfiguration('bender-training', 'response_media_type', None, configDictionary)
self.raw_data_format = utils.getKeyFromSectionInConfiguration('bender-training', 'raw_data_format', None, configDictionary)
self.train_data_q_media_type = utils.getKeyFromSectionInConfiguration('bender-training', 'train_data_q_media_type', None, configDictionary)
self.train_data_a_media_type = utils.getKeyFromSectionInConfiguration('bender-training', 'train_data_a_media_type', None, configDictionary)
self.output_path = utils.getKeyFromSectionInConfiguration('bender-training', 'output_path', None, configDictionary)
self.train_data_queries_root_dir = utils.getKeyFromSectionInConfiguration('bender-training', 'converted_train_data_q_path', None, configDictionary)
self.train_data_answers_dir = utils.getKeyFromSectionInConfiguration('bender-training', 'converted_train_data_a_path', None, configDictionary)
self.generate_lsi = int(utils.getKeyFromSectionInConfiguration('bender-training', 'generate_lsi', 0, configDictionary))
concept = utils.getModulenameFromConfiguration('concept', 'modules.concept.mlconcept.MLConcept', configDictionary)
utils.validate_module_class(concept, MLConcept)
self.concept = utils.initialize_class(concept, configDictionary)
nlp = utils.getModulenameFromConfiguration('nlp', 'modules.nlp.mlnlp.MLNLP', configDictionary)
utils.validate_module_class(nlp, MLNLP)
self.nlp = utils.initialize_class(nlp, configDictionary)
utils.safe_create_directory(self.output_path)
self.configDictionary = configDictionary
self.question_file = ''
self.answers_file = ''
self.questions = []
self.answers = []
self.internalMachineLogics = []
def _initializeBrain(self):
print('1/5: INITIALIZING --BRAIN-- :-)... & SPELLING ENGINE...')
self.brain = MLBrain(self.configDictionary)
self.brain.setNLPModule(self.nlp)
self.brain.setConceptModule(self.concept)
print("2/5: CONVERTING TRAINING DATA FROM SOURCE FORMAT AND ADDING TO --BRAIN--...")
train_data = pickle.load(open(self.train_data_source_file, 'rb'))
if train_data is not None:
self.brain.batchAddNewQAPairs(train_data)
def _initializeSimilarity(self):
print("3/5: INITIALIZING SIMILARITY INDEXES...")
if self.generate_lsi == 1:
from modules.similarity.lsi.gensimmodelgenerator import GenSimModelGenerator
gensim_generator = GenSimModelGenerator(self.brain, self.configDictionary)
gensim_generator.create()
def _trainInternalMachineLogics(self):
print("4/5: TRAINING INTERNAL MACHINE LOGICS...")
internalMLs = utils.getSectionFromConfiguration('i-machinelogic', None, self.configDictionary)
for iml in internalMLs:
module = iml['training_module']
utils.validate_module_class(module, MLInternalMachineLogicTrainer)
newInstance = utils.initialize_class_with_config_section(module, iml, self.configDictionary, brain=self.brain)
newInstance.train()
def _prepareNNIQAFormat(self):
print("5/5: CONVERTING TRAINING DATA FOR NN in I-Q/A-FORMAT...")
questions = []
answers = {}
self.answers = self.brain.getAllAnswers()
self.questions = self.brain.getAllQAPairs()
for entry in self.answers:
answerText = entry['answer']
aID = int(entry['id'])
document = self.brain.getIndicesForText(answerText)
arr = []
for sentence in document:
arr.extend(sentence)
answers[aID] = arr
questions = []
for question in self.questions:
qText = question['question']
document = self.brain.getIndicesForText(qText)
arr = []
for sentence in document:
arr.extend(sentence)
questions.append({'question': arr, 'answers':question['answers']})
output_p = os.path.join(self.output_path, 'nn_iqa')
utils.safe_create_directory(output_p)
output_qt = os.path.join(output_p, 'questions.json')
output_qp = os.path.join(output_p, 'questions.pickle')
output_at = os.path.join(output_p, 'answers.json')
output_ap = os.path.join(output_p, 'answers.pickle')
json.dump(questions, open(output_qt, 'w'), indent=4)
json.dump(answers, open(output_at, 'w'), indent=4)
pickle.dump(questions, open(output_qp, 'wb'))
pickle.dump(answers, open(output_ap, 'wb'))
def _prepareNNTrainData(self):
self._prepareNNIQAFormat()
return None
def train(self):
self._initializeBrain()
self._initializeSimilarity()
self._trainInternalMachineLogics()
self._prepareNNTrainData()
print("...done")
print('SUMMARY:')
print('\t[x] Dictionary Generation')
print('\t[x] Questions-Corpus Generation')
print('\t[x] Index-Generation (TfIfd & LSI)')
print('\t[x] Conversion of question-text to dictionary-IDS')
print('\t[x] Generation of a Word2Vec database (->%s/models)' % self.output_path)
print('\t[x] Training of INTERNAL Neuronal Networks/i-Machine-Logic Engines')
print('\t[ ] Training of EXTERNAL Neuronal Networks/MachineLogic Engines')
print("----> Your EXTERNAL NEURONAL NETWORK training data is at: ", self.output_path, "/nn_iqa ...", sep='')
| 5,937 | 15 | 210 |
af9b3f69b1325129ae442f2d87c9a08513882bd7 | 2,014 | py | Python | egs/sre21-av-a/v1.16k/local/estimate_lid_labels.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 14 | 2021-12-19T04:24:15.000Z | 2022-03-18T03:24:04.000Z | egs/sre21-av-a/v1.16k/local/estimate_lid_labels.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | null | null | null | egs/sre21-av-a/v1.16k/local/estimate_lid_labels.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 5 | 2021-12-14T20:41:27.000Z | 2022-02-24T14:18:11.000Z | #!/usr/bin/env python
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import os
import logging
from jsonargparse import ArgumentParser, namespace_to_dict
import math
import numpy as np
from hyperion.hyp_defs import float_cpu, config_logger
from hyperion.utils import Utt2Info
from hyperion.io import RandomAccessDataReaderFactory as DRF
if __name__ == "__main__":
parser = ArgumentParser(description="Transform xvector logits into labels")
parser.add_argument("--list-file", required=True)
parser.add_argument("--logits-file", required=True)
parser.add_argument("--class-file", required=True)
parser.add_argument("--output-file", required=True)
parser.add_argument(
"--sre21",
default=False,
action="store_true",
help="If SRE21 only ENG/CMN/YUE are allowed",
)
parser.add_argument(
"-v", "--verbose", dest="verbose", default=1, choices=[0, 1, 2, 3], type=int
)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
estimate_lid_labels(**namespace_to_dict(args))
| 29.188406 | 84 | 0.66435 | #!/usr/bin/env python
"""
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import os
import logging
from jsonargparse import ArgumentParser, namespace_to_dict
import math
import numpy as np
from hyperion.hyp_defs import float_cpu, config_logger
from hyperion.utils import Utt2Info
from hyperion.io import RandomAccessDataReaderFactory as DRF
def estimate_lid_labels(list_file, logits_file, class_file, output_file, sre21):
logging.info("Converting logits to labels for %s", list_file)
utts = Utt2Info.load(list_file)
reader = DRF.create(logits_file)
classes = []
with open(class_file, "r") as f:
for line in f:
classes.append(line.strip())
x = reader.read(utts.key, squeeze=True)
if sre21:
sre21_langs = ["ENG", "CMN", "YUE"]
log_priors = -1000 * np.ones((len(classes),), dtype=float_cpu())
for c in sre21_langs:
log_priors[classes.index(c)] = -math.log(len(sre21_langs))
x += log_priors
class_idx = np.argmax(x, axis=1)
with open(output_file, "w") as f:
for i, k in enumerate(utts.key):
f.write("%s %s\n" % (k, classes[class_idx[i]]))
if __name__ == "__main__":
parser = ArgumentParser(description="Transform xvector logits into labels")
parser.add_argument("--list-file", required=True)
parser.add_argument("--logits-file", required=True)
parser.add_argument("--class-file", required=True)
parser.add_argument("--output-file", required=True)
parser.add_argument(
"--sre21",
default=False,
action="store_true",
help="If SRE21 only ENG/CMN/YUE are allowed",
)
parser.add_argument(
"-v", "--verbose", dest="verbose", default=1, choices=[0, 1, 2, 3], type=int
)
args = parser.parse_args()
config_logger(args.verbose)
del args.verbose
logging.debug(args)
estimate_lid_labels(**namespace_to_dict(args))
| 797 | 0 | 23 |
39501971fe565c85d10ffb357bd2874912b66428 | 278 | py | Python | backend/users/urls.py | alessondelmiro/github_monitor | 27447da7ddd9a3da6241da112c494f4abff57928 | [
"MIT"
] | null | null | null | backend/users/urls.py | alessondelmiro/github_monitor | 27447da7ddd9a3da6241da112c494f4abff57928 | [
"MIT"
] | 1 | 2020-05-18T02:53:52.000Z | 2020-05-18T02:53:52.000Z | backend/users/urls.py | alessondelmiro/github_monitor | 27447da7ddd9a3da6241da112c494f4abff57928 | [
"MIT"
] | null | null | null | from django.conf.urls import include
from django.urls import path
from rest_framework import routers
from .viewsets import UserViewSet
ROUTER = routers.DefaultRouter()
ROUTER.register(r'', UserViewSet, basename='User')
urlpatterns = (
path('', include(ROUTER.urls)),
)
| 18.533333 | 50 | 0.758993 | from django.conf.urls import include
from django.urls import path
from rest_framework import routers
from .viewsets import UserViewSet
ROUTER = routers.DefaultRouter()
ROUTER.register(r'', UserViewSet, basename='User')
urlpatterns = (
path('', include(ROUTER.urls)),
)
| 0 | 0 | 0 |
5d9d1cf5f7ce68f79c998a02ff1931832057feb4 | 2,225 | py | Python | run_tests.py | Songtrust/django-cache-machine | ddc144875ec7b3b6fb121c7384c4fe2c4f9fbcda | [
"BSD-3-Clause"
] | null | null | null | run_tests.py | Songtrust/django-cache-machine | ddc144875ec7b3b6fb121c7384c4fe2c4f9fbcda | [
"BSD-3-Clause"
] | null | null | null | run_tests.py | Songtrust/django-cache-machine | ddc144875ec7b3b6fb121c7384c4fe2c4f9fbcda | [
"BSD-3-Clause"
] | null | null | null | """
Creating standalone Django apps is a PITA because you're not in a project, so
you don't have a settings.py file. I can never remember to define
DJANGO_SETTINGS_MODULE, so I run these commands which get the right env
automatically.
"""
import argparse
import os
import sys
from subprocess import call, check_output
NAME = os.path.basename(os.path.dirname(__file__))
ROOT = os.path.abspath(os.path.dirname(__file__))
os.environ['PYTHONPATH'] = os.pathsep.join([ROOT,
os.path.join(ROOT, 'examples')])
SETTINGS = (
'locmem_settings',
'settings',
'memcache_byid',
'custom_backend',
'redis_settings',
'redis_byid',
'django_redis_settings',
)
if __name__ == "__main__":
main()
| 37.083333 | 91 | 0.625618 | """
Creating standalone Django apps is a PITA because you're not in a project, so
you don't have a settings.py file. I can never remember to define
DJANGO_SETTINGS_MODULE, so I run these commands which get the right env
automatically.
"""
import argparse
import os
import sys
from subprocess import call, check_output
NAME = os.path.basename(os.path.dirname(__file__))
ROOT = os.path.abspath(os.path.dirname(__file__))
os.environ['PYTHONPATH'] = os.pathsep.join([ROOT,
os.path.join(ROOT, 'examples')])
SETTINGS = (
'locmem_settings',
'settings',
'memcache_byid',
'custom_backend',
'redis_settings',
'redis_byid',
'django_redis_settings',
)
def main():
parser = argparse.ArgumentParser(description='Run the tests for django-cache-machine. '
'If no options are specified, tests will be run with '
'all settings files and without coverage.py.')
parser.add_argument('--with-coverage', action='store_true',
help='Run tests with coverage.py and display coverage report')
parser.add_argument('--settings', choices=SETTINGS,
help='Run tests only for the specified settings file')
args = parser.parse_args()
settings = args.settings and [args.settings] or SETTINGS
results = []
django_admin = check_output(['which', 'django-admin']).strip()
for i, settings_module in enumerate(settings):
print('Running tests for: %s' % settings_module)
os.environ['DJANGO_SETTINGS_MODULE'] = 'cache_machine.%s' % settings_module
# append to the existing coverage data for all but the first run
if args.with_coverage and i > 0:
test_cmd = ['coverage', 'run', '--append']
elif args.with_coverage:
test_cmd = ['coverage', 'run']
else:
test_cmd = []
test_cmd += [django_admin, 'test', '--keepdb']
results.append(call(test_cmd))
if args.with_coverage:
results.append(call(['coverage', 'report', '-m', '--fail-under', '70']))
sys.exit(any(results) and 1 or 0)
if __name__ == "__main__":
main()
| 1,445 | 0 | 23 |
74e4b11b2054da3bfa8876209342d8a4e338cc89 | 3,174 | py | Python | profiles/views.py | greenelab/tribe | 7e499b03a56f8d5ae22b540dbccb071edd84d79c | [
"BSD-3-Clause"
] | 4 | 2020-07-24T01:06:09.000Z | 2021-11-08T11:18:24.000Z | profiles/views.py | greenelab/tribe | 7e499b03a56f8d5ae22b540dbccb071edd84d79c | [
"BSD-3-Clause"
] | 49 | 2017-08-01T18:59:58.000Z | 2022-02-12T01:54:53.000Z | profiles/views.py | greenelab/tribe | 7e499b03a56f8d5ae22b540dbccb071edd84d79c | [
"BSD-3-Clause"
] | 3 | 2017-08-01T21:12:05.000Z | 2017-10-02T20:37:07.000Z | from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from allauth.utils import generate_unique_username
from profiles.models import Profile
from profiles.forms import UpgradeUserForm, CreateTempAcctForm
| 41.763158 | 128 | 0.624449 | from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from allauth.utils import generate_unique_username
from profiles.models import Profile
from profiles.forms import UpgradeUserForm, CreateTempAcctForm
def create_temporary_acct(request):
if request.method == 'POST':
form = CreateTempAcctForm(request.POST)
latest_temp_user = User.objects.filter(username__startswith='TemporaryUser').latest('date_joined')
if form.is_valid():
try:
# The next few lines number Temporary Users's usernames.
# If we have a lot of traffic, we might have to change this in case multiple temporary
# accounts want to be created at the exact same time (as this would violate the
# unique=True constraint for username)
latest_temp_num = int(latest_temp_user.username[13:])
latest_temp_num += 1
username = 'TemporaryUser' + str(latest_temp_num)
import uuid
password = str(uuid.uuid4())
new_user = User.objects.create_user(username, None, password=password, first_name='Temporary', last_name='User')
Profile.objects.create(user=new_user, temporary_acct=True)
user = authenticate(username=username, password=password)
login(request, user)
request.session.set_expiry(31536000) # Make session persistent for a year
return HttpResponseRedirect('/')
except:
return HttpResponseRedirect('/')
else:
form = CreateTempAcctForm()
return render(request, 'create_temp_acct.html', {'form': form})
def convert_to_full_acct(request):
user = request.user
if request.method == 'POST':
form = UpgradeUserForm(request.POST)
try:
profile = Profile.objects.get(user=user) # Check that there is a profile for this user
except:
return render(request, 'invalid_acct.html', {})
if (user.is_authenticated() and profile.temporary_acct):
if form.is_valid():
email = form.cleaned_data['email']
new_password = form.cleaned_data['password']
user.email = email
user.set_password(new_password)
user.username = generate_unique_username((email, ))
user.save()
profile.temporary_acct = False
profile.save()
request.session.set_expiry(0) # Make user logged in for the duration of the session, as would be the
# case when logged in with any other regular account.
return HttpResponseRedirect('/')
else:
return render(request, 'invalid_acct.html', {})
elif (user.is_authenticated() == False):
return render(request, 'invalid_acct.html', {})
else:
form = UpgradeUserForm()
return render(request, 'upgrade_temp_acct.html', {'form': form})
| 2,790 | 0 | 46 |
96fc572e74a0c79c36edf7163e17292aaeee0b4d | 174 | py | Python | thirdpart/django_constance-1.0.1-py2.6.egg/constance/utils.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 2 | 2017-06-21T09:46:55.000Z | 2018-05-30T10:07:32.000Z | thirdpart/django_constance-1.0.1-py2.6.egg/constance/utils.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 3 | 2020-02-11T23:01:19.000Z | 2021-06-10T17:55:33.000Z | thirdpart/django_constance-1.0.1-py2.6.egg/constance/utils.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 1 | 2020-10-01T04:11:41.000Z | 2020-10-01T04:11:41.000Z | from django.utils.importlib import import_module
| 24.857143 | 50 | 0.752874 | from django.utils.importlib import import_module
def import_module_attr(path):
package, module = path.rsplit('.', 1)
return getattr(import_module(package), module)
| 101 | 0 | 23 |
34079e62d9a0ac3312266f9963fef439c646fc32 | 557 | py | Python | bhcrjyApp/app/index.py | caochong01/quick-reptiler | e605afce0c3558a1a01b2406dae88c8f3a0f8792 | [
"Apache-2.0"
] | null | null | null | bhcrjyApp/app/index.py | caochong01/quick-reptiler | e605afce0c3558a1a01b2406dae88c8f3a0f8792 | [
"Apache-2.0"
] | null | null | null | bhcrjyApp/app/index.py | caochong01/quick-reptiler | e605afce0c3558a1a01b2406dae88c8f3a0f8792 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from bhcrjyApp.AppUtils.HttpMessageTool import HttpUtils
from bhcrjyApp.app import loginCheck
from flask.blueprints import Blueprint
from flask import request, make_response
from flask import render_template, redirect, abort, url_for
bp = Blueprint('index', __name__, url_prefix='/')
@bp.route('/main_index', methods=['GET', 'POST'])
@loginCheck
def main_index():
"""
首页展示
:return:
"""
response = make_response(render_template('index/index.html', skipClass=url_for('looktax.skipClass')))
return response
| 25.318182 | 105 | 0.728905 | # -*- coding: utf-8 -*-
from bhcrjyApp.AppUtils.HttpMessageTool import HttpUtils
from bhcrjyApp.app import loginCheck
from flask.blueprints import Blueprint
from flask import request, make_response
from flask import render_template, redirect, abort, url_for
bp = Blueprint('index', __name__, url_prefix='/')
@bp.route('/main_index', methods=['GET', 'POST'])
@loginCheck
def main_index():
"""
首页展示
:return:
"""
response = make_response(render_template('index/index.html', skipClass=url_for('looktax.skipClass')))
return response
| 0 | 0 | 0 |
c029c39dc8eebf1b2def23da5533040338e57ea7 | 1,219 | py | Python | osr2mp4/ImageProcess/Objects/Components/AScorebar.py | siveroo/osr2mp4-core | 7aaf33e7b4798634bb4151a8246dbf2d5ef49cf9 | [
"MIT"
] | 103 | 2020-06-07T15:28:41.000Z | 2022-03-01T17:07:35.000Z | osr2mp4/ImageProcess/Objects/Components/AScorebar.py | siveroo/osr2mp4-core | 7aaf33e7b4798634bb4151a8246dbf2d5ef49cf9 | [
"MIT"
] | 50 | 2020-06-07T10:53:21.000Z | 2021-12-19T14:47:47.000Z | osr2mp4/ImageProcess/Objects/Components/AScorebar.py | siveroo/osr2mp4-core | 7aaf33e7b4798634bb4151a8246dbf2d5ef49cf9 | [
"MIT"
] | 21 | 2020-07-12T16:02:35.000Z | 2022-03-01T17:07:37.000Z | from osr2mp4.ImageProcess.Objects.FrameObject import FrameObject
| 23.901961 | 64 | 0.675964 | from osr2mp4.ImageProcess.Objects.FrameObject import FrameObject
class AScorebar(FrameObject):
def __init__(self, frames, settings):
super().__init__(frames, settings=settings)
self.scrolltime = 80
self.s = 0
self.scrolling = False
self.breakk = None
self.direction = 1
self.duration = 0
self.interval = 0
self.alpha = 1
self.h = 0
self.dd = 0.2
def startbreak(self, breakk, duration):
if self.breakk == breakk["Start"]:
return
self.s = 0
self.scrolling = True
self.breakk = breakk["Start"]
self.duration = duration - 100
self.interval = 1000/self.settings.fps
self.direction = self.dd #* 60/self.settings.fps
def animate(self):
self.duration -= self.interval
if self.duration < 0:
self.direction = -self.dd #* 60/self.settings.fps
self.scrolling = True
self.duration = 0
self.interval = 0
# print(self.duration, self.scrolling, alpha, self.s)
if self.scrolling:
self.s += 1000/self.settings.fps * self.direction
self.alpha = min(1.0, max(0.0, 1 - self.s/self.scrolltime))
self.h = self.s
if self.alpha == 0 or self.alpha == 1:
self.scrolling = False
if not self.scrolling and self.interval == 0:
self.alpha = 1
self.h = 0
| 1,051 | 8 | 94 |
9f7f4b1ca5790f07a34806474fddf006ce47b3c8 | 1,365 | py | Python | py3server/swagger_server/test/test_concepts_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | py3server/swagger_server/test/test_concepts_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | py3server/swagger_server/test/test_concepts_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from swagger_server.models.inline_response2001 import InlineResponse2001
from swagger_server.models.inline_response2002 import InlineResponse2002
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestConceptsController(BaseTestCase):
""" ConceptsController integration test stubs """
def test_get_concept_details(self):
"""
Test case for get_concept_details
"""
response = self.client.open('/api/concepts/{conceptId}'.format(conceptId='conceptId_example'),
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_get_concepts(self):
"""
Test case for get_concepts
"""
query_string = [('keywords', 'keywords_example'),
('semgroups', 'semgroups_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open('/api/concepts',
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 31.022727 | 102 | 0.597802 | # coding: utf-8
from __future__ import absolute_import
from swagger_server.models.inline_response2001 import InlineResponse2001
from swagger_server.models.inline_response2002 import InlineResponse2002
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestConceptsController(BaseTestCase):
""" ConceptsController integration test stubs """
def test_get_concept_details(self):
"""
Test case for get_concept_details
"""
response = self.client.open('/api/concepts/{conceptId}'.format(conceptId='conceptId_example'),
method='GET')
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
def test_get_concepts(self):
"""
Test case for get_concepts
"""
query_string = [('keywords', 'keywords_example'),
('semgroups', 'semgroups_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open('/api/concepts',
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 0 | 0 | 0 |
afb45a1f6d669be9137bd116b381f8ffa90a7cd3 | 1,855 | py | Python | tests/seahub/views/sysadmin/test_sys_virus_scan_records.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 2 | 2017-06-21T09:46:55.000Z | 2018-05-30T10:07:32.000Z | tests/seahub/views/sysadmin/test_sys_virus_scan_records.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | null | null | null | tests/seahub/views/sysadmin/test_sys_virus_scan_records.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 1 | 2020-10-01T04:11:41.000Z | 2020-10-01T04:11:41.000Z | import os
from mock import patch
import pytest
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
TRAVIS = 'TRAVIS' in os.environ
| 35 | 210 | 0.687332 | import os
from mock import patch
import pytest
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
TRAVIS = 'TRAVIS' in os.environ
class VirusScanRecord(object):
def __init__(self, repo_id):
self.repo_id = repo_id
class SysVirusScanRecordsTest(BaseTestCase):
# @patch('seahub.utils.EVENTS_ENABLED', True)
# @patch('seahub.utils.get_virus_record')
# def test_can_list_empty(self, mock_get_virus_record):
# mock_get_virus_record.return_value = []
# self.login_as(self.admin)
# resp = self.client.get(reverse('sys_virus_scan_records'))
# self.assertEqual(200, resp.status_code)
# self.assertTemplateUsed(resp, 'sysadmin/sys_virus_scan_records.html')
def _get_virus_record(self, start, limit):
records = []
for i in range(11):
record = VirusScanRecord(self.repo.id)
record.vid = i + 1
record.has_handle = False
records.append(record)
return records
@pytest.mark.skipif(TRAVIS, reason="TODO: this test can only be run seperately due to the url module init in django, we may need to reload url conf: https://gist.github.com/anentropic/9ac47f6518c88fa8d2b0")
@patch('seahub.utils.EVENTS_ENABLED')
@patch('seahub.utils.get_virus_record')
def test_can_list_records_num_more_than_10(self, mock_get_virus_record,
mock_events_enabled):
mock_events_enabled = True
mock_get_virus_record.side_effect = self._get_virus_record
self.login_as(self.admin)
resp = self.client.get(reverse('sys_virus_scan_records'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'sysadmin/sys_virus_scan_records.html')
assert len(resp.context['records']) >= 10
| 786 | 826 | 72 |
fc0bc2028b678416bdf0a92e7eb92733be71a5b3 | 1,043 | py | Python | playground/localController.py | sysuroboclub/LineFollowCar | 4ce8c85d0757190b98f782ec964bc8456efaf97f | [
"MIT"
] | 4 | 2019-11-28T11:40:42.000Z | 2019-12-04T11:38:42.000Z | playground/localController.py | sysuroboclub/LineFollowCar | 4ce8c85d0757190b98f782ec964bc8456efaf97f | [
"MIT"
] | null | null | null | playground/localController.py | sysuroboclub/LineFollowCar | 4ce8c85d0757190b98f782ec964bc8456efaf97f | [
"MIT"
] | null | null | null | from inputs import get_gamepad
import serial
ser=serial.Serial('COM4',115200)
prevB=0
while True:
events = get_gamepad()
#X,Y [-32768,+32768]
#Z,RZ(RT,LT) [0,255]
local=ser.read_all()
if len(local)>0:
print(local.decode())
for event in events:
# print(event.ev_type, event.code, event.state)
if event.code=='ABS_X':
snd='X'+str(int(event.state/512))+'\r'
ser.write(snd.encode())
if event.code=='ABS_Y':
snd='Y'+str(int(event.state/512))+'\r'
ser.write(snd.encode())
if event.code=='ABS_RZ':
snd='R'+str(int(event.state))+'\r'
ser.write(snd.encode())
if event.code=='ABS_Z':
snd='L'+str(int(event.state))+'\r'
ser.write(snd.encode())
if event.code=='BTN_EAST':
if event.state==0 and prevB==1:
ser.close()
ser.open()
exit(0)
else:
prevB=event.state
| 31.606061 | 56 | 0.495686 | from inputs import get_gamepad
import serial
ser=serial.Serial('COM4',115200)
prevB=0
while True:
events = get_gamepad()
#X,Y [-32768,+32768]
#Z,RZ(RT,LT) [0,255]
local=ser.read_all()
if len(local)>0:
print(local.decode())
for event in events:
# print(event.ev_type, event.code, event.state)
if event.code=='ABS_X':
snd='X'+str(int(event.state/512))+'\r'
ser.write(snd.encode())
if event.code=='ABS_Y':
snd='Y'+str(int(event.state/512))+'\r'
ser.write(snd.encode())
if event.code=='ABS_RZ':
snd='R'+str(int(event.state))+'\r'
ser.write(snd.encode())
if event.code=='ABS_Z':
snd='L'+str(int(event.state))+'\r'
ser.write(snd.encode())
if event.code=='BTN_EAST':
if event.state==0 and prevB==1:
ser.close()
ser.open()
exit(0)
else:
prevB=event.state
| 0 | 0 | 0 |
9ee5b2331da5d82edf1810c3ea0de1c521383a2e | 9,145 | py | Python | gms-common/python/master-coi-data-client/util/flatfiles/fieldFormatter.py | SNL-GMS/GMS-PI7-OPEN | 4c5f2a33a45566b12897bcdc129609c9e6b95442 | [
"BSD-3-Clause"
] | 5 | 2020-01-20T14:53:11.000Z | 2021-11-30T23:01:08.000Z | gms-common/python/master-coi-data-client/util/flatfiles/fieldFormatter.py | SNL-GMS/GMS-PI7-OPEN | 4c5f2a33a45566b12897bcdc129609c9e6b95442 | [
"BSD-3-Clause"
] | 7 | 2019-12-30T06:08:08.000Z | 2022-03-02T06:38:09.000Z | gms-common/python/master-coi-data-client/util/flatfiles/fieldFormatter.py | SNL-GMS/GMS-PI7-OPEN | 4c5f2a33a45566b12897bcdc129609c9e6b95442 | [
"BSD-3-Clause"
] | 1 | 2019-12-10T19:37:03.000Z | 2019-12-10T19:37:03.000Z | # -*- coding: utf-8 -*-
import util.flatfiles.shared
from util.conversions import *
'''
helper methods for formatting CSS flatfiles
All methods use ljust to left justify the field.
The integer argument to ljust specifies that a field has a width of exactly that integer value: the field
is padded with whitespace if the contents do not subsume the entire width.
The slicing (truncating) mechanism is added at the end of each field as a safeguard to
ensure that a field has width no more than the integer value.
'''
# Generic format
'''
1 space
'''
| 17.791829 | 106 | 0.721487 | # -*- coding: utf-8 -*-
import util.flatfiles.shared
from util.conversions import *
'''
helper methods for formatting CSS flatfiles
All methods use ljust to left justify the field.
The integer argument to ljust specifies that a field has a width of exactly that integer value: the field
is padded with whitespace if the contents do not subsume the entire width.
The slicing (truncating) mechanism is added at the end of each field as a safeguard to
ensure that a field has width no more than the integer value.
'''
def format_lat(latitude):
return latitude.ljust(11)[:11]
def format_lon(longitude):
return longitude.ljust(11)[:11]
def format_sta(siteUUID):
return siteUUID.ljust(6)[:6]
def format_refsta(refsta):
return refsta.ljust(6)[:6]
def format_statype(station_type):
return station_type.ljust(4)[:4]
def format_elev(elevation):
return elevation.ljust(9)[:9]
def format_dnorth(north_displacement):
return north_displacement.ljust(9)[:9]
def format_deast(east_displacement):
return east_displacement.ljust(9)[:9]
def format_chan(chanUUID):
return chanUUID.ljust(8)[:8]
def format_jdate(actualChangeTime):
return iso8601_to_jdate(actualChangeTime).ljust(8)[:8]
def format_ondate(actualChangeTime):
return iso8601_to_jdate(actualChangeTime).ljust(8)[:8]
def format_offdate(actualChangeTime):
if actualChangeTime == '-1':
return actualChangeTime.ljust(8)[:8]
else:
return iso8601_to_jdate(actualChangeTime).ljust(8)[:8]
def format_calratio(calibrationConversionRatio):
return calibrationConversionRatio.ljust(16)[:16]
def format_chanid(UUID):
return UUID.ljust(8)[:8]
def format_version_id(versionId):
return versionId.ljust(5)[:5]
def format_ctype(n):
return n.ljust(4)[:4]
def format_edepth(depth):
return depth.ljust(24)[:24]
def format_hang(horizontalAngle):
return horizontalAngle.ljust(24)[:24]
def format_vang(verticalAngle):
return verticalAngle.ljust(24)[:24]
def format_descrip(description):
return description.ljust(50)[:50]
def format_net(net):
return net.ljust(8)[:8]
def format_netname(netname):
return netname.ljust(80)[:80]
def format_nettype(nettype):
return nettype.ljust(4)[:4]
def format_auth(auth):
return auth.ljust(15)[:15]
def format_commid(commid):
return commid.ljust(9)[:9]
def format_system_change_time(systemChangeTime):
return iso8601_to_regular_datetime(systemChangeTime).ljust(17)[:17]
def format_time(actualChangeTime):
return str(iso8601_to_epoch(actualChangeTime)).ljust(17)[:17]
def format_time_arrival(timeString):
return timeString.ljust(17)[:17]
def format_endtime(actualChangeTime):
if actualChangeTime == util.flatfiles.shared.NA_ENDTIME:
return actualChangeTime.ljust(17)[:17]
else:
return str(iso8601_to_epoch(actualChangeTime)).ljust(17)[:17]
def format_calper(calibrationPeriod):
return calibrationPeriod.ljust(16)[:16]
def format_tshift(timeShift):
return timeShift.ljust(16)[:16]
def format_staname(description):
return description.ljust(50)[:50]
def format_insname(manufacturer):
return manufacturer.ljust(50)[:50]
def format_instype(model):
return model.ljust(6)[:6]
def format_samprate(nominalSampleRate):
return nominalSampleRate.ljust(11)[:11]
def format_ncalib(nominalCalibrationFactor):
return nominalCalibrationFactor.ljust(16)[:16]
def format_ncalper(nominalCalibrationPeriod):
return nominalCalibrationPeriod.ljust(16)[:16]
def format_rsptype(type):
return type.ljust(6)[:6]
def format_elev(elevation):
return elevation.ljust(9)[:9]
def format_wfid(waveformId):
return waveformId.ljust(8)[:8]
def format_nsamp(numberOfSamples):
return numberOfSamples.ljust(8)[:8]
def format_samprate(sampleRatePerSecond):
return sampleRatePerSecond.ljust(11)[:11]
def format_calib(nominalCalibration):
return nominalCalibration.ljust(16)[:16]
def format_segtype(indexingMethod):
return indexingMethod.ljust(1)[:1]
def format_datatype(numericStorage):
return numericStorage.ljust(2)[:2]
def format_clip(clippedFlag):
return clippedFlag.ljust(1)[:1]
def format_dir(directory):
return directory.ljust(64)[:64]
def format_dfile(dataFile):
return dataFile.ljust(32)[:32]
def format_foff(byteOffset):
return byteOffset.ljust(10)[:10]
def format_commid_wfdisc(commid):
return commid.ljust(8)[:8]
def format_arid(arrivalId):
return arrivalId.ljust(9)[:9]
def format_stassid(arrivalGroupId):
return arrivalGroupId.ljust(9)[:9]
def format_iphase(reportedPhase):
return reportedPhase.ljust(8)[:8]
def format_stype(signalType):
return signalType.ljust(1)[:1]
def format_deltim(arrivalTimeUncertainty):
return arrivalTimeUncertainty.ljust(6)[:6]
def format_azimuth(observedAzimuth):
return observedAzimuth.ljust(7)[:7]
def format_delaz(azimuthUncertainty):
return azimuthUncertainty.ljust(7)[:7]
def format_slow(slownessMeasurement):
return slownessMeasurement.ljust(7)[:7]
def format_delslo(slownessUncertainty):
return slownessUncertainty.ljust(7)[:7]
def format_ema(emergenceAngle):
return emergenceAngle.ljust(7)[:7]
def format_rect(signalRectilinearity):
return signalRectilinearity.ljust(7)[:7]
def format_amp(measuredAmplitude):
return measuredAmplitude.ljust(11)[:11]
def format_per(measuredPeriodAtTimeOfAmplitudeMeasurement):
return measuredPeriodAtTimeOfAmplitudeMeasurement.ljust(7)[:7]
def format_logat(logOfAmpDividedByPeriod):
return logOfAmpDividedByPeriod.ljust(7)[:7]
def format_fm(firstMotion):
return firstMotion.ljust(2)[:2]
def format_snr(signalToNoiseRation):
return signalToNoiseRation.ljust(10)[:10]
def format_qual(onsetArrivalQuality):
return onsetArrivalQuality.ljust(1)[:1]
def format_belief(val):
return val.ljust(4)[:4]
def format_delta(val):
return val.ljust(8)[:8]
def format_evid(evid):
return evid.ljust(9)[:9]
def format_evname(evname):
return evname.ljust(32)[:32]
def format_perfor(perfor):
return perfor.ljust(9)[:9]
# Generic format
def format(val, size):
return val.ljust(size)[:size]
def format_lat_11(lat):
return lat.ljust(11)[:11]
def format_dtype(dtype):
return dtype.ljust(1)[:1]
def format_mb(mb):
return mb.ljust(7)[:7]
def format_mbid(mbid):
return mbid.ljust(9)[:9]
def format_ms(ms):
return ms.ljust(7)[:7]
def format_msid(msid):
return msid.ljust(9)[:9]
def format_ml(ml):
return ml.ljust(7)[:7]
def format_mlid(mlid):
return mlid.ljust(9)[:9]
def format_algor(algor):
return algor.ljust(15)[:15]
def format_sxx(sxx):
return sxx.ljust(15)[:15]
def format_syy(syy):
return syy.ljust(15)[:15]
def format_szz(szz):
return szz.ljust(15)[:15]
def format_stt(stt):
return stt.ljust(15)[:15]
def format_sxy(sxy):
return sxy.ljust(15)[:15]
def format_sxz(sxz):
return sxz.ljust(15)[:15]
def format_sxt(sxt):
return sxt.ljust(15)[:15]
def format_syz(syz):
return syz.ljust(15)[:15]
def format_syt(syt):
return syt.ljust(15)[:15]
def format_szt(szt):
return szt.ljust(15)[:15]
def format_sdobs(stdDevObservation):
return stdDevObservation.ljust(9)[:9]
def format_smajax(majorAxis):
return majorAxis.ljust(9)[:9]
def format_sminax(minorAxis):
return minorAxis.ljust(9)[:9]
def format_strike(majorAxisTrend):
return majorAxisTrend.ljust(6)[:6]
def format_depth(depthKm):
return depthKm.ljust(9)[:9]
def format_orid(orid):
return orid.ljust(9)[:9]
def format_grn(grn):
return grn.ljust(8)[:8]
def format_srn(srn):
return srn.ljust(8)[:8]
def format_etype(etype):
return etype.ljust(7)[:7]
def format_depdp(depdp):
return depdp.ljust(9)[:9]
def format_sdepth(depthUncertainty):
return depthUncertainty.ljust(9)[:9]
def format_stime(timeUncertainty):
return timeUncertainty.ljust(6)[:6]
def format_conf(confidenceLevel):
return confidenceLevel.ljust(5)[:5]
def format_prefor(preferredEventHypId):
return preferredEventHypId.ljust(8)[:8]
def format_nass(nass):
return nass.ljust(4)[:4]
def format_ndef(ndef):
return ndef.ljust(4)[:4]
def format_ndp(ndp):
return ndp.ljust(4)[:4]
def format_phase(phase):
return phase.ljust(8)[:8]
def format_seaz(receiverToSourceAz):
return receiverToSourceAz.ljust(7)[:7]
def format_esaz(sourceToReceiverAz):
return sourceToReceiverAz.ljust(7)[:7]
def format_timeres(timeRes):
return timeRes.ljust(8)[:8]
def format_timedef(timeDef):
return timeDef.ljust(1)[:1]
def format_azres(azres):
return azres.ljust(7)[:7]
def format_azdef(azdef):
return azdef.ljust(1)[:1]
def format_slores(slores):
return slores.ljust(7)[:7]
def format_slodef(slodef):
return slodef.ljust(1)[:1]
def format_emares(emares):
return emares.ljust(7)[:7]
def format_wqt(wqt):
return wqt.ljust(6)[:6]
def format_vmodel(vmodel):
return vmodel.ljust(15)[:15]
'''
1 space
'''
def gap():
return ' '
| 5,662 | 0 | 2,805 |
9e70df1d2d4dde6b061e90b058d2136c68d8f180 | 15,104 | py | Python | blink/biencoder/eval_entity_discovery.py | parin1995/claim2fact | 9f928a5789b3dc85cfa69395e0ba02b0b84276a8 | [
"MIT"
] | 8 | 2021-09-08T04:39:31.000Z | 2022-02-10T06:28:36.000Z | blink/biencoder/eval_entity_discovery.py | parin1995/claim2fact | 9f928a5789b3dc85cfa69395e0ba02b0b84276a8 | [
"MIT"
] | null | null | null | blink/biencoder/eval_entity_discovery.py | parin1995/claim2fact | 9f928a5789b3dc85cfa69395e0ba02b0b84276a8 | [
"MIT"
] | 2 | 2022-01-04T08:10:37.000Z | 2022-01-28T00:07:19.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2021 Dhruv Agarwal and authors of arboEL.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import math
import time
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
import numpy as np
from tqdm import tqdm
import pickle
import faiss
from itertools import compress
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from scipy.sparse import coo_matrix
from scipy.sparse.csgraph import connected_components
from special_partition.special_partition import cluster_linking_partition
from collections import defaultdict
import blink.biencoder.data_process_mult as data_process
import blink.candidate_ranking.utils as utils
from blink.common.params import BlinkParser
from blink.biencoder.biencoder import BiEncoderRanker
from IPython import embed
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
args = parser.parse_args()
print(args)
main(args.__dict__)
| 44.952381 | 189 | 0.647577 | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2021 Dhruv Agarwal and authors of arboEL.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import math
import time
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
import numpy as np
from tqdm import tqdm
import pickle
import faiss
from itertools import compress
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from scipy.sparse import coo_matrix
from scipy.sparse.csgraph import connected_components
from special_partition.special_partition import cluster_linking_partition
from collections import defaultdict
import blink.biencoder.data_process_mult as data_process
import blink.candidate_ranking.utils as utils
from blink.common.params import BlinkParser
from blink.biencoder.biencoder import BiEncoderRanker
from IPython import embed
def partition_graph(graph, n_entities, directed, return_clusters=False, exclude=set(), threshold=None, without_entities=False):
rows, cols, data, shape = graph['rows'], graph['cols'], graph['data'], graph['shape']
if not without_entities:
rows, cols, data = cluster_linking_partition(
rows,
cols,
data,
n_entities,
directed,
exclude=exclude,
threshold=threshold
)
else:
# Manual filtering that special partition executes
seen = set()
duplicated, excluded, thresholded = 0, 0, 0
_f_row, _f_col, _f_data = [], [], []
for k in range(len(rows)):
if (rows[k], cols[k]) in seen:
duplicated += 1
continue
seen.add((rows[k], cols[k]))
if rows[k] in exclude or cols[k] in exclude:
excluded += 1
continue
if threshold is not None and data[k] < threshold:
thresholded += 1
continue
_f_row.append(rows[k])
_f_col.append(cols[k])
_f_data.append(data[k])
rows, cols, data = list(map(np.array, (_f_row, _f_col, _f_data)))
if duplicated + excluded + thresholded > 0:
print(f"""
Dropped edges during pre-processing:
Duplicates: {duplicated}
Excluded: {excluded}
Thresholded: {thresholded}""")
# Construct the partitioned graph
partitioned_graph = coo_matrix(
(data, (rows, cols)), shape=shape)
if return_clusters:
# Get an array of the graph with each index marked with the component label that it is connected to
_, cc_labels = connected_components(
csgraph=partitioned_graph,
directed=directed,
return_labels=True)
# Store clusters of indices marked with labels with at least 2 connected components
unique_cc_labels, cc_sizes = np.unique(cc_labels, return_counts=True)
filtered_labels = unique_cc_labels[cc_sizes >= 2]
clusters = defaultdict(list)
for i, cc_label in enumerate(cc_labels):
if cc_label in filtered_labels:
clusters[cc_label].append(i)
return partitioned_graph, clusters
return partitioned_graph
def analyzeClusters(clusters, gold_cluster_labels, n_entities, n_mentions, logger):
logger.info("Analyzing clusters...")
predicted_cluster_labels = [-1*i for i in range(1, n_mentions+1)]
n_predicted = 0
for cluster in clusters.values():
cluster_label = cluster[0]
for i in range(len(cluster)):
men_idx = cluster[i] - n_entities
if men_idx < 0:
continue
predicted_cluster_labels[men_idx] = cluster_label
n_predicted += 1
debug_no_pred = 0
for l in predicted_cluster_labels:
if l < 0:
debug_no_pred += 1
assert n_predicted + debug_no_pred == n_mentions
logger.info(f"{n_predicted} mentions assigned to {len(clusters)} clusters; {debug_no_pred} singelton clusters")
nmi = normalized_mutual_info_score(gold_cluster_labels, predicted_cluster_labels)
rand_index = adjusted_rand_score(gold_cluster_labels, predicted_cluster_labels)
result = (nmi + rand_index) / 2
logger.info(f"NMI={nmi}, rand_index={rand_index} => average={result}")
return {'rand_index': rand_index, 'nmi': nmi, 'average': result}
def main(params):
time_start = time.time()
output_path = params["output_path"]
if not os.path.exists(output_path):
os.makedirs(output_path)
logger = utils.get_logger(params["output_path"], 'log-discovery')
embed_data_path = params["embed_data_path"]
if embed_data_path is None or not os.path.exists(embed_data_path):
embed_data_path = output_path
pickle_src_path = params["pickle_src_path"]
if pickle_src_path is None or not os.path.exists(pickle_src_path):
pickle_src_path = output_path
rng = np.random.default_rng(seed=17)
knn = params["knn"]
use_types = params["use_types"]
data_split = params["data_split"] # Default = "test"
graph_mode = params.get('graph_mode', None)
logger.info(f"Dataset: {data_split.upper()}")
# Load evaluation data
entity_dictionary_loaded = False
dictionary_pkl_path = os.path.join(pickle_src_path, 'test_dictionary.pickle')
tensor_data_pkl_path = os.path.join(pickle_src_path, 'test_tensor_data.pickle')
mention_data_pkl_path = os.path.join(pickle_src_path, 'test_mention_data.pickle')
print("Loading stored processed entity dictionary...")
with open(dictionary_pkl_path, 'rb') as read_handle:
dictionary = pickle.load(read_handle)
print("Loading stored processed mention data...")
with open(tensor_data_pkl_path, 'rb') as read_handle:
tensor_data = pickle.load(read_handle)
with open(mention_data_pkl_path, 'rb') as read_handle:
mention_data = pickle.load(read_handle)
print("Loading embed data...")
# Check and load stored embedding data
embed_data_path = os.path.join(embed_data_path, 'embed_data.t7')
embed_data = torch.load(embed_data_path)
# Load stored joint graphs
graph_path = os.path.join(output_path, 'graphs.pickle')
print("Loading stored joint graphs...")
with open(graph_path, 'rb') as read_handle:
joint_graphs = pickle.load(read_handle)
n_entities = len(dictionary)
n_mentions = len(mention_data)
n_labels = 1 # Zeshel and MedMentions have single gold entity mentions
mention_gold_cui_idxs = list(map(lambda x: x['label_idxs'][n_labels - 1], mention_data))
ents_in_data = np.unique(mention_gold_cui_idxs)
if params['drop_all_entities']:
ent_drop_prop = 1
n_ents_dropped = len(ents_in_data)
n_mentions_wo_gold_ents = n_mentions
logger.info(f"Dropping all {n_ents_dropped} entities found in mention set")
set_dropped_ent_idxs = set()
else:
# Percentage of entities from the mention set to drop
ent_drop_prop = 0.1
logger.info(f"Dropping {ent_drop_prop*100}% of {len(ents_in_data)} entities found in mention set")
# Get entity indices to drop
n_ents_dropped = int(ent_drop_prop*len(ents_in_data))
dropped_ent_idxs = rng.choice(ents_in_data, size=n_ents_dropped, replace=False)
set_dropped_ent_idxs = set(dropped_ent_idxs)
n_mentions_wo_gold_ents = sum([1 if x in set_dropped_ent_idxs else 0 for x in mention_gold_cui_idxs])
logger.info(f"Dropped {n_ents_dropped} entities")
logger.info(f"=> Mentions without gold entities = {n_mentions_wo_gold_ents}")
# Load embeddings in order to compute new KNN entities after dropping
print('Computing new dictionary indexes...')
original_dict_embeds = embed_data['dict_embeds']
keep_mask = np.ones(len(original_dict_embeds), dtype='bool')
keep_mask[dropped_ent_idxs] = False
dict_embeds = original_dict_embeds[keep_mask]
new_to_old_dict_mapping = []
for i in range(len(original_dict_embeds)):
if keep_mask[i]:
new_to_old_dict_mapping.append(i)
men_embeds = embed_data['men_embeds']
if use_types:
dict_idxs_by_type = data_process.get_idxs_by_type(list(compress(dictionary, keep_mask)))
dict_indexes = data_process.get_index_from_embeds(dict_embeds, dict_idxs_by_type, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
if 'men_idxs_by_type' in embed_data:
men_idxs_by_type = embed_data['men_idxs_by_type']
else:
men_idxs_by_type = data_process.get_idxs_by_type(mention_data)
else:
dict_index = data_process.get_index_from_embeds(dict_embeds, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
# Fetch additional KNN entity to make sure every mention has a linked entity after dropping
extra_entity_knn = []
if use_types:
for men_type in men_idxs_by_type:
dict_index = dict_indexes[men_type]
dict_type_idx_mapping = dict_idxs_by_type[men_type]
q_men_embeds = men_embeds[men_idxs_by_type[men_type]] # np.array(list(map(lambda x: men_embeds[x], men_idxs_by_type[men_type])))
fetch_k = 1 if isinstance(dict_index, faiss.IndexFlatIP) else 16
_, nn_idxs = dict_index.search(q_men_embeds, fetch_k)
for i, men_idx in enumerate(men_idxs_by_type[men_type]):
r = n_entities + men_idx
q_nn_idxs = dict_type_idx_mapping[nn_idxs[i]]
q_nn_embeds = torch.tensor(dict_embeds[q_nn_idxs]).cuda()
q_scores = torch.flatten(
torch.mm(torch.tensor(q_men_embeds[i:i+1]).cuda(), q_nn_embeds.T)).cpu()
c, data = new_to_old_dict_mapping[q_nn_idxs[torch.argmax(q_scores)]], torch.max(q_scores)
extra_entity_knn.append((r,c,data))
else:
fetch_k = 1 if isinstance(dict_index, faiss.IndexFlatIP) else 16
_, nn_idxs = dict_index.search(men_embeds, fetch_k)
for men_idx, men_embed in enumerate(men_embeds):
r = n_entities + men_idx
q_nn_idxs = nn_idxs[men_idx]
q_nn_embeds = torch.tensor(dict_embeds[q_nn_idxs]).cuda()
q_scores = torch.flatten(
torch.mm(torch.tensor(np.expand_dims(men_embed, axis=0)).cuda(), q_nn_embeds.T)).cpu()
c, data = new_to_old_dict_mapping[q_nn_idxs[torch.argmax(q_scores)]], torch.max(q_scores)
extra_entity_knn.append((r,c,data))
# Add entities for mentions whose
for k in joint_graphs:
rows, cols, data= [], [], []
for edge in extra_entity_knn:
rows.append(edge[0])
cols.append(edge[1])
data.append(edge[2])
joint_graphs[k]['rows'] = np.concatenate((joint_graphs[k]['rows'], rows))
joint_graphs[k]['cols'] = np.concatenate((joint_graphs[k]['cols'], cols))
joint_graphs[k]['data'] = np.concatenate((joint_graphs[k]['data'], data))
results = {
'data_split': data_split.upper(),
'n_entities': n_entities,
'n_mentions': n_mentions,
'n_entities_dropped': f"{n_ents_dropped} ({ent_drop_prop*100}%)",
'n_mentions_wo_gold_entities': n_mentions_wo_gold_ents
}
if graph_mode is None or graph_mode not in ['directed', 'undirected']:
graph_mode = ['directed', 'undirected']
else:
graph_mode = [graph_mode]
n_thresholds = params['n_thresholds'] # Default is 10
exact_threshold = params.get('exact_threshold', None)
exact_knn = params.get('exact_knn', None)
kmeans = KMeans(n_clusters=n_thresholds, random_state=17)
# TODO: Baseline? (without dropping entities)
for mode in graph_mode:
best_result = -1.
best_config = None
for k in joint_graphs:
if params['drop_all_entities']:
# Drop all entities from the graph
rows, cols, data = joint_graphs[k]['rows'], joint_graphs[k]['cols'], joint_graphs[k]['data']
_f_row, _f_col, _f_data = [], [], []
for ki in range(len(joint_graphs[k]['rows'])):
if joint_graphs[k]['cols'][ki] < n_entities or joint_graphs[k]['rows'][ki] < n_entities:
continue
_f_row.append(joint_graphs[k]['rows'][ki])
_f_col.append(joint_graphs[k]['cols'][ki])
_f_data.append(joint_graphs[k]['data'][ki])
joint_graphs[k]['rows'], joint_graphs[k]['cols'], joint_graphs[k]['data'] = list(map(np.array, (_f_row, _f_col, _f_data)))
if (exact_knn is None and k > 0 and k <= knn) or (exact_knn is not None and k == exact_knn):
if exact_threshold is not None:
thresholds = np.array([0, exact_threshold])
else:
thresholds = np.sort(np.concatenate(([0], kmeans.fit(joint_graphs[k]['data'].reshape(-1,1)).cluster_centers_.flatten())))
for thresh in thresholds:
print("\nPartitioning...")
logger.info(f"{mode.upper()}, k={k}, threshold={thresh}")
# Partition graph based on cluster-linking constraints
partitioned_graph, clusters = partition_graph(
joint_graphs[k], n_entities, mode == 'directed', return_clusters=True, exclude=set_dropped_ent_idxs, threshold=thresh, without_entities=params['drop_all_entities'])
# Analyze cluster against gold clusters
result = analyzeClusters(clusters, mention_gold_cui_idxs, n_entities, n_mentions, logger)
results[f'({mode}, {k}, {thresh})'] = result
if thresh != 0 and result['average'] > best_result:
best_result = result['average']
best_config = (mode, k, thresh)
results[f'best_{mode}_config'] = best_config
results[f'best_{mode}_result'] = best_result
# Store results
output_file_name = os.path.join(
output_path, f"{data_split}_eval_discovery_{__import__('calendar').timegm(__import__('time').gmtime())}.json")
with open(output_file_name, 'w') as f:
json.dump(results, f, indent=2)
print(f"\nAnalysis saved at: {output_file_name}")
execution_time = (time.time() - time_start) / 60
logger.info(f"\nTotal time taken: {execution_time} minutes\n")
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
args = parser.parse_args()
print(args)
main(args.__dict__)
| 13,835 | 0 | 69 |
15a695620d5688781ce9dae02f1a6bc734c2c733 | 1,623 | py | Python | cupid/camera.py | iinnovations/iicontrollibs | 94af26a61405f1ad928d36e36602ebb859a2e44f | [
"Apache-2.0"
] | 11 | 2015-06-22T21:38:15.000Z | 2021-03-10T11:24:21.000Z | cupid/camera.py | iinnovations/iicontrollibs | 94af26a61405f1ad928d36e36602ebb859a2e44f | [
"Apache-2.0"
] | null | null | null | cupid/camera.py | iinnovations/iicontrollibs | 94af26a61405f1ad928d36e36602ebb859a2e44f | [
"Apache-2.0"
] | 12 | 2015-03-05T00:19:40.000Z | 2020-12-18T15:21:44.000Z | #!/usr/bin/python3
__author__ = "Colin Reese"
__copyright__ = "Copyright 2016, Interface Innovations"
__credits__ = ["Colin Reese"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Colin Reese"
__email__ = "support@interfaceinnovations.org"
__status__ = "Development"
if __name__ == "__main__":
takesnap() | 33.122449 | 181 | 0.674677 | #!/usr/bin/python3
__author__ = "Colin Reese"
__copyright__ = "Copyright 2016, Interface Innovations"
__credits__ = ["Colin Reese"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Colin Reese"
__email__ = "support@interfaceinnovations.org"
__status__ = "Development"
def takesnap(path='/var/www/webcam/images/', filename='current.jpg', quality=75, width=None, timeout=2000):
# import picamera
import subprocess
import os
from iiutilities.datalib import timestringtoseconds
from iiutilities.datalib import gettimestring
# camera = picamera.PiCamera()
imagepath = path + filename
timestamp = gettimestring()
timestamppath = imagepath + '.timestamp'
time1 = gettimestring()
if width:
height = int(float(width) / 1.33333)
subprocess.call(['raspistill','-q', str(quality), '--width', str(width), '--height', str(height), '-t', str(timeout), '-o', imagepath])
else:
width = 2592
height = 1944
subprocess.call(['raspistill','-q', str(quality), '-t', str(timeout), '-o', imagepath])
with open(timestamppath,'w') as f:
f.write(timestamp)
f.close()
# camera.capture(path + filename)
time2 = gettimestring()
elapsedtime = timestringtoseconds(time2) - timestringtoseconds(time1)
try:
imagesize = os.path.getsize(imagepath)
except:
imagesize = 0
return {'elapsedtime':elapsedtime, 'imagepath':imagepath, 'timestamp':timestamp, 'timestamppath': timestamppath, 'imageheight':height, 'imagewidth':width, 'imagesize':imagesize}
if __name__ == "__main__":
takesnap() | 1,272 | 0 | 23 |
799cc6dc3e7ff67b281f390e045b4dee11a4e321 | 5,085 | py | Python | Task2/IGNORE_miscellaneousScripts/getPredictions_weakDefenses_old.py | Jacob-L-Vincent/project-athena | d1d300e375941399f116cbaa4678a9ed7c6652db | [
"MIT"
] | 1 | 2020-11-11T19:22:25.000Z | 2020-11-11T19:22:25.000Z | Task2/IGNORE_miscellaneousScripts/getPredictions_weakDefenses_old.py | Jacob-L-Vincent/project-athena | d1d300e375941399f116cbaa4678a9ed7c6652db | [
"MIT"
] | null | null | null | Task2/IGNORE_miscellaneousScripts/getPredictions_weakDefenses_old.py | Jacob-L-Vincent/project-athena | d1d300e375941399f116cbaa4678a9ed7c6652db | [
"MIT"
] | null | null | null | #########################################################
## generate the ensemble predictions for task 2 ##
## created by Isaac Keohane isaackeohane95@gmail.com ##
#########################################################
import os
import sys
module_path = os.path.abspath(os.path.join('../src'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import os
from matplotlib import pyplot as plt
from utils.file import load_from_json
from scripts.ourFuncs_task2 import generate_subset
from scripts.setup_ensemble import setup_ensemble
# load experiment configurations
trans_configs = load_from_json("../src/configs/demo/athena-mnist.json")
model_configs = load_from_json("../src/configs/demo/model-mnist.json")
data_configs = load_from_json("../src/configs/demo/data-mnist.json")
output_dir = "../ourDataFiles/ensembleOuts"
save_output = True
verbose = 10
#####################################################
### setup the ensemble pool of weak defenses
# This wdList can be changed to a list of indexes of weak defenses in the
# athena-mnist.json file to get a custom set of weak defenses used in the
# emsemble. Make sure to then set "customList" True and "useActi..." False
# both set to False makes it use all the transformations in trans_configs
wdList = []
useActiveList = False
customList = False
# run setup_ensemble to make an ensemble pool of weak defenses
athena = setup_ensemble(trans_configs=trans_configs,
model_configs=model_configs,
use_logits=False,
useActiveList=useActiveList,
customList=customList, wdList=wdList)
######################################################
### generate subset indexes for exmaples and save info file
# define the subset parameters
numberToSubset = 100
doRandom = True
totalNumData = 100
# generate subset indexes to grab benign samples
subset, subsetElse = generate_subset(totalSize=totalNumData,doSave=True,
number=numberToSubset,doRandom=doRandom,
opath=[r"../ourInfoSaves/ensPred_subset.npy",
r"../ourInfoSaves/ensPred_subsetElse.npy"])
# save info in a text file
if save_output:
info_file = open(r"../ourInfoSaves/infoFile_ensPred.txt","w")
info_file.write("Info file for ensemble predictions\n\n")
info_file.write("numberToSubset: {}, doRandom: {}\nsubset:\n".format(
numberToSubset, doRandom))
info_file.write("{}\n\n".format(subset))
info_file.write("useActiveList: {}\ncustomList: {}\nwdList: \n{}\n\n".format(
str(useActiveList), str(customList), wdList) )
info_file.write("dimensions of raw npy arrays: wd, input, class")
info_file.close()
############################################################################
## generate and collect probabilities of benign samples
bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
x_bs = np.load(bs_file)
if(verbose>5): print("\nbenign sample data dimensions: {}\n".format(x_bs.shape))
totalNumData = x_bs.shape[0]
x_bs = [x_bs[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_bs) # raw is False by default
preds_raw = athena.predict(x=x_bs,raw=True)
if(verbose>5): print("\n>>> Shape of benign ensemble predictions: {}\n".format(preds.shape))
if save_output:
np.save(output_dir+"/"+"ensemPredic_benign_raw.npy",preds_raw)
np.save(output_dir+"/"+"ensemPredic_benign.npy",preds)
###########################################################################
### generate and collect the probabilities for our advers. examples
ae_dir, ae_files = data_configs.get('ae_dir'), data_configs.get('ae_files')
for ae_file in ae_files:
ae_file1 = os.path.join(ae_dir, ae_file)
x_ae = np.load(ae_file1)
x_ae = [x_ae[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_ae) # raw is False by default
preds_raw = athena.predict(x=x_ae,raw=True)
if save_output:
np.save(output_dir+"/"+"ensemPredic_raw_{}".format(ae_file),preds_raw)
np.save(output_dir+"/"+"ensemPredic_{}".format(ae_file),preds)
if(verbose>5): print("\n>>> Shape of ae ensemble {} predictions: {}\n".format(ae_file,preds.shape))
##################################################33
dirt = '/home/isaac/working_directory/misc/project-athena/data2_genAEs_weakD'
dirs = os.listdir(dirt)
results = []
results += [file for file in dirs]
for filename in results:
x_ae = np.load(dirt + '/' + filename)
x_ae = [x_ae[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_ae) # raw is False by default
preds_raw = athena.predict(x=x_ae,raw=True)
if save_output:
np.save(output_dir+"/"+"ensemPredic_raw_{}".format(filename),preds_raw)
np.save(output_dir+"/"+"ensemPredic_{}".format(filename),preds)
if(verbose>5): print("\n>>> Shape of ae ensemble {} predictions: {}\n".format(filename,preds.shape))
| 37.947761 | 108 | 0.632842 | #########################################################
## generate the ensemble predictions for task 2 ##
## created by Isaac Keohane isaackeohane95@gmail.com ##
#########################################################
import os
import sys
module_path = os.path.abspath(os.path.join('../src'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import os
from matplotlib import pyplot as plt
from utils.file import load_from_json
from scripts.ourFuncs_task2 import generate_subset
from scripts.setup_ensemble import setup_ensemble
# load experiment configurations
trans_configs = load_from_json("../src/configs/demo/athena-mnist.json")
model_configs = load_from_json("../src/configs/demo/model-mnist.json")
data_configs = load_from_json("../src/configs/demo/data-mnist.json")
output_dir = "../ourDataFiles/ensembleOuts"
save_output = True
verbose = 10
#####################################################
### setup the ensemble pool of weak defenses
# This wdList can be changed to a list of indexes of weak defenses in the
# athena-mnist.json file to get a custom set of weak defenses used in the
# emsemble. Make sure to then set "customList" True and "useActi..." False
# both set to False makes it use all the transformations in trans_configs
wdList = []
useActiveList = False
customList = False
# run setup_ensemble to make an ensemble pool of weak defenses
athena = setup_ensemble(trans_configs=trans_configs,
model_configs=model_configs,
use_logits=False,
useActiveList=useActiveList,
customList=customList, wdList=wdList)
######################################################
### generate subset indexes for exmaples and save info file
# define the subset parameters
numberToSubset = 100
doRandom = True
totalNumData = 100
# generate subset indexes to grab benign samples
subset, subsetElse = generate_subset(totalSize=totalNumData,doSave=True,
number=numberToSubset,doRandom=doRandom,
opath=[r"../ourInfoSaves/ensPred_subset.npy",
r"../ourInfoSaves/ensPred_subsetElse.npy"])
# save info in a text file
if save_output:
info_file = open(r"../ourInfoSaves/infoFile_ensPred.txt","w")
info_file.write("Info file for ensemble predictions\n\n")
info_file.write("numberToSubset: {}, doRandom: {}\nsubset:\n".format(
numberToSubset, doRandom))
info_file.write("{}\n\n".format(subset))
info_file.write("useActiveList: {}\ncustomList: {}\nwdList: \n{}\n\n".format(
str(useActiveList), str(customList), wdList) )
info_file.write("dimensions of raw npy arrays: wd, input, class")
info_file.close()
############################################################################
## generate and collect probabilities of benign samples
bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))
x_bs = np.load(bs_file)
if(verbose>5): print("\nbenign sample data dimensions: {}\n".format(x_bs.shape))
totalNumData = x_bs.shape[0]
x_bs = [x_bs[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_bs) # raw is False by default
preds_raw = athena.predict(x=x_bs,raw=True)
if(verbose>5): print("\n>>> Shape of benign ensemble predictions: {}\n".format(preds.shape))
if save_output:
np.save(output_dir+"/"+"ensemPredic_benign_raw.npy",preds_raw)
np.save(output_dir+"/"+"ensemPredic_benign.npy",preds)
###########################################################################
### generate and collect the probabilities for our advers. examples
ae_dir, ae_files = data_configs.get('ae_dir'), data_configs.get('ae_files')
for ae_file in ae_files:
ae_file1 = os.path.join(ae_dir, ae_file)
x_ae = np.load(ae_file1)
x_ae = [x_ae[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_ae) # raw is False by default
preds_raw = athena.predict(x=x_ae,raw=True)
if save_output:
np.save(output_dir+"/"+"ensemPredic_raw_{}".format(ae_file),preds_raw)
np.save(output_dir+"/"+"ensemPredic_{}".format(ae_file),preds)
if(verbose>5): print("\n>>> Shape of ae ensemble {} predictions: {}\n".format(ae_file,preds.shape))
##################################################33
dirt = '/home/isaac/working_directory/misc/project-athena/data2_genAEs_weakD'
dirs = os.listdir(dirt)
results = []
results += [file for file in dirs]
for filename in results:
x_ae = np.load(dirt + '/' + filename)
x_ae = [x_ae[i] for i in subset]
# grab predictions
preds = athena.predict(x=x_ae) # raw is False by default
preds_raw = athena.predict(x=x_ae,raw=True)
if save_output:
np.save(output_dir+"/"+"ensemPredic_raw_{}".format(filename),preds_raw)
np.save(output_dir+"/"+"ensemPredic_{}".format(filename),preds)
if(verbose>5): print("\n>>> Shape of ae ensemble {} predictions: {}\n".format(filename,preds.shape))
| 0 | 0 | 0 |
c984f47ed4da7a82fac2d962c224423565a31436 | 1,569 | py | Python | DetectCartype.py | mani-vegupatti/car_detection_in_video | 4cf3b6aae6510e2cbfa5563898884f4ca73826b7 | [
"MIT"
] | null | null | null | DetectCartype.py | mani-vegupatti/car_detection_in_video | 4cf3b6aae6510e2cbfa5563898884f4ca73826b7 | [
"MIT"
] | null | null | null | DetectCartype.py | mani-vegupatti/car_detection_in_video | 4cf3b6aae6510e2cbfa5563898884f4ca73826b7 | [
"MIT"
] | null | null | null | import tensorflow as tf
import keras
from keras.preprocessing import image
from keras.initializers import glorot_uniform
import numpy as np
import cv2
import os
import csv
IMG_SIZE = 224
color = {}
| 34.866667 | 187 | 0.637349 | import tensorflow as tf
import keras
from keras.preprocessing import image
from keras.initializers import glorot_uniform
import numpy as np
import cv2
import os
import csv
IMG_SIZE = 224
color = {}
class DetectCartype():
def __init__(self,model_json="./cartype_model_data/cartype_model_v01.json", weights_h5="./cartype_model_data/cartype_weights_v01.h5"):
self.model_json = model_json #cartype_model.json
self.weights_h5 = weights_h5 #cartype_weights.h5
self.model = self.load_model()
def load_model(self):
#Reading the model from JSON file
with open(self.model_json, 'r') as json_file:
json_savedModel= json_file.read()
#load the model architecture
model_j = tf.keras.models.model_from_json(json_savedModel)
model_j.summary()
model_j.load_weights(self.weights_h5)
return model_j
def predict_cartype(self,in_img):
img = cv2.resize(in_img,(IMG_SIZE,IMG_SIZE))
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
pred = self.model.predict(img_tensor)
if pred[0][0] < 0.5:
type = "Hatchback"
else:
type = "Sedan"
return type
| 1,238 | 1 | 106 |
302fb68444e03d57eea86305002642d21bb55592 | 16,964 | py | Python | synarchive/connection.py | aimakerspace/synergos_archive | da68b745982658f50bd40ddc380c3dbc0dcb3b29 | [
"Apache-2.0"
] | null | null | null | synarchive/connection.py | aimakerspace/synergos_archive | da68b745982658f50bd40ddc380c3dbc0dcb3b29 | [
"Apache-2.0"
] | null | null | null | synarchive/connection.py | aimakerspace/synergos_archive | da68b745982658f50bd40ddc380c3dbc0dcb3b29 | [
"Apache-2.0"
] | 1 | 2022-01-21T01:05:46.000Z | 2022-01-21T01:05:46.000Z | #!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import os
from typing import Dict
# Libs
import jsonschema
import tinydb
# Custom
from .base import TopicalRecords, AssociationRecords
from .config import SCHEMAS as schemas
##################
# Configurations #
##################
#############################################
# Data Storage Class - CollaborationRecords #
#############################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
#######################################
# Data Storage Class - ProjectRecords #
#######################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
###########################################
# Data Storage Class - ParticipantRecords #
###########################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
##########################################
# Data Storage Class - ExperimentRecords #
##########################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
###################################
# Data Storage Class - RunRecords #
###################################
###########
# Helpers #
###########
##################
# Core Functions #
##################
#################################################
# Data Storage Association class - Registration #
#################################################
class RegistrationRecords(AssociationRecords):
""" RegistrationRecords documents associative records as a means to allow
participants to interact with different projects and vice-versa.
Note: Associative records DO NOT have user-allocated IDs! They are
auto-generated to be used as foreign keys in other downstream
associative records. Registrations are associative records and
will not have a registration ID as part of its composite key.
Instead it will exist under the 'link' key.
"""
###########
# Helpers #
###########
##################
# Core Functions #
##################
###############################################
# Data Storage Association class - TagRecords #
###############################################
class TagRecords(AssociationRecords):
""" TagRecords documents the child associations of a participant with its
registered project, archiving data tags used to locate datasets to be
loaded during FL training.
Note: Associative records DO NOT have user-allocated IDs! They are
auto-generated to be used as foreign keys in other downstream
associative records. Tags are associative records and will not
have a tag ID as part of its composite key.
"""
###########
# Helpers #
###########
##################
# Core Functions #
##################
| 27.718954 | 85 | 0.551521 | #!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import os
from typing import Dict
# Libs
import jsonschema
import tinydb
# Custom
from .base import TopicalRecords, AssociationRecords
from .config import SCHEMAS as schemas
##################
# Configurations #
##################
#############################################
# Data Storage Class - CollaborationRecords #
#############################################
class CollaborationRecords(TopicalRecords):
def __init__(self, db_path: str):
super().__init__(
"Collaboration",
"collab_id",
db_path,
*[
"Project",
"Experiment",
"Run",
"Registration",
"Tag",
"Alignment",
"Model",
"Validation",
"Prediction"
]
)
###########
# Helpers #
###########
def __generate_key(self, collab_id: str) -> Dict[str, str]:
return {'collab_id': collab_id}
##################
# Core Functions #
##################
def create(self, collab_id: str, details: dict) -> dict:
# Check that new details specified conforms to project schema
jsonschema.validate(details, schemas["collaboration_schema"])
collab_key = self.__generate_key(collab_id)
new_collab = {'key': collab_key}
new_collab.update(details)
return super().create(new_collab)
def read(self, collab_id: str) -> dict:
collab_key = self.__generate_key(collab_id)
return super().read(collab_key)
def update(self, collab_id: str, updates: dict) -> dict:
collab_key = self.__generate_key(collab_id)
return super().update(collab_key, updates)
def delete(self, collab_id: str) -> dict:
collab_key = self.__generate_key(collab_id)
return super().delete(collab_key)
#######################################
# Data Storage Class - ProjectRecords #
#######################################
class ProjectRecords(TopicalRecords):
def __init__(self, db_path: str):
super().__init__(
"Project",
"project_id",
db_path,
*[
"Experiment",
"Run",
"Registration",
"Tag",
"Alignment",
"Model",
"Validation",
"Prediction"
]
)
###########
# Helpers #
###########
def __generate_key(
self,
collab_id: str,
project_id: str
) -> Dict[str, str]:
return {'collab_id': collab_id, 'project_id': project_id}
##################
# Core Functions #
##################
def create(
self,
collab_id: str,
project_id: str,
details: dict
) -> dict:
# Check that new details specified conforms to project schema
jsonschema.validate(details, schemas["project_schema"])
project_key = self.__generate_key(collab_id, project_id)
new_project = {'key': project_key}
new_project.update(details)
return super().create(new_project)
def read(
self,
collab_id: str,
project_id: str
) -> dict:
project_key = self.__generate_key(collab_id, project_id)
return super().read(project_key)
def update(
self,
collab_id: str,
project_id: str,
updates: dict
) -> dict:
project_key = self.__generate_key(collab_id, project_id)
return super().update(project_key, updates)
def delete(
self,
collab_id: str,
project_id: str
) -> dict:
project_key = self.__generate_key(collab_id, project_id)
return super().delete(project_key)
###########################################
# Data Storage Class - ParticipantRecords #
###########################################
class ParticipantRecords(TopicalRecords):
def __init__(self, db_path: str):
super().__init__(
"Participant",
"participant_id",
db_path,
*["Registration", "Tag", "Alignment", "Validation", "Prediction"]
)
###########
# Helpers #
###########
def __generate_key(self, participant_id):
return {"participant_id": participant_id}
##################
# Core Functions #
##################
def create(self, participant_id, details):
# Check that new details specified conforms to project schema
jsonschema.validate(details, schemas["participant_schema"])
assert participant_id == details["id"]
participant_key = self.__generate_key(participant_id)
new_participant = {'key': participant_key}
new_participant.update(details)
return super().create(new_participant)
def read(self, participant_id):
participant_key = self.__generate_key(participant_id)
return super().read(participant_key)
def update(self, participant_id, updates):
participant_key = self.__generate_key(participant_id)
return super().update(participant_key, updates)
def delete(self, participant_id):
participant_key = self.__generate_key(participant_id)
return super().delete(participant_key)
##########################################
# Data Storage Class - ExperimentRecords #
##########################################
class ExperimentRecords(TopicalRecords):
def __init__(self, db_path: str):
super().__init__(
"Experiment",
"expt_id",
db_path,
*["Run", "Model", "Validation", "Prediction"]
)
###########
# Helpers #
###########
def __generate_key(
self,
collab_id: str,
project_id: str,
expt_id: str
) -> Dict[str, str]:
return {
'collab_id': collab_id,
"project_id": project_id,
"expt_id": expt_id
}
##################
# Core Functions #
##################
def create(
self,
collab_id: str,
project_id: str,
expt_id: str,
details: dict
) -> dict:
# Check that new details specified conforms to experiment schema
jsonschema.validate(details, schemas["experiment_schema"])
expt_key = self.__generate_key(collab_id, project_id, expt_id)
new_expt = {'key': expt_key}
new_expt.update(details)
return super().create(new_expt)
def read(
self,
collab_id: str,
project_id: str,
expt_id: str
) -> dict:
expt_key = self.__generate_key(collab_id, project_id, expt_id)
return super().read(expt_key)
def update(
self,
collab_id: str,
project_id: str,
expt_id: str,
updates: dict
) -> dict:
expt_key = self.__generate_key(collab_id, project_id, expt_id)
return super().update(expt_key, updates)
def delete(
self,
collab_id: str,
project_id: str,
expt_id: str
) -> dict:
expt_key = self.__generate_key(collab_id, project_id, expt_id)
return super().delete(expt_key)
###################################
# Data Storage Class - RunRecords #
###################################
class RunRecords(TopicalRecords):
def __init__(self, db_path: str):
super().__init__(
"Run",
"run_id",
db_path,
*["Model", "Validation", "Prediction"]
)
###########
# Helpers #
###########
def __generate_key(
self,
collab_id: str,
project_id: str,
expt_id: str,
run_id: str
) -> Dict[str, str]:
return {
'collab_id': collab_id,
'project_id': project_id,
'expt_id': expt_id,
'run_id': run_id
}
##################
# Core Functions #
##################
def create(
self,
collab_id: str,
project_id: str,
expt_id: str,
run_id: str,
details: dict
) -> dict:
# Check that new details specified conforms to experiment schema
jsonschema.validate(details, schemas["run_schema"])
run_key = self.__generate_key(collab_id, project_id, expt_id, run_id)
new_run = {'key': run_key}
new_run.update(details)
return super().create(new_run)
def read(
self,
collab_id: str,
project_id: str,
expt_id: str,
run_id: str,
) -> dict:
run_key = self.__generate_key(collab_id, project_id, expt_id, run_id)
return super().read(run_key)
def update(
self,
collab_id: str,
project_id: str,
expt_id: str,
run_id: str,
updates: dict
) -> dict:
run_key = self.__generate_key(collab_id, project_id, expt_id, run_id)
return super().update(run_key, updates)
def delete(
self,
collab_id: str,
project_id: str,
expt_id: str,
run_id: str,
) -> dict:
run_key = self.__generate_key(collab_id, project_id, expt_id, run_id)
return super().delete(run_key)
#################################################
# Data Storage Association class - Registration #
#################################################
class RegistrationRecords(AssociationRecords):
""" RegistrationRecords documents associative records as a means to allow
participants to interact with different projects and vice-versa.
Note: Associative records DO NOT have user-allocated IDs! They are
auto-generated to be used as foreign keys in other downstream
associative records. Registrations are associative records and
will not have a registration ID as part of its composite key.
Instead it will exist under the 'link' key.
"""
def __init__(self, db_path: str):
super().__init__(
subject="Registration",
identifier="registration_id",
db_path=db_path,
# relations=["Project", "Participant", "Tag", "Alignment"]
relations=["Tag", "Alignment"]
) # no upstream relations
# Note: Registration has 2 hidden upstream relations
###########
# Helpers #
###########
def __generate_key(
self,
collab_id: str,
project_id: str,
participant_id: str
) -> Dict[str, str]:
return {
'collab_id': collab_id,
'project_id': project_id,
'participant_id': participant_id,
}
def __cross_link_subjects(
self,
collab_id: str,
project_id: str,
participant_id: str,
concise: bool = True
):
relevant_records = {}
# Retrieve relevant collaboration using specified collboration ID
collaboration_records = CollaborationRecords(db_path=self.db_path)
relevant_collaboration = collaboration_records.read(
collab_id=collab_id
)
# Retrieve relevant project using specified project ID
project_records = ProjectRecords(db_path=self.db_path)
relevant_project = project_records.read(
collab_id=collab_id,
project_id=project_id
)
# Retrieve relevant participants using specified participant ID
participant_records = ParticipantRecords(db_path=self.db_path)
relevant_participant = participant_records.read(
participant_id=participant_id
)
# Remove details from internals nesting relations
if concise:
relevant_collaboration.pop('relations')
relevant_project.pop('relations')
relevant_participant.pop('relations')
relevant_records['collaboration'] = relevant_collaboration
relevant_records['project'] = relevant_project
relevant_records['participant'] = relevant_participant
return relevant_records
##################
# Core Functions #
##################
def create(
self,
collab_id: str,
project_id: str,
participant_id: str,
details: dict
) -> dict:
# Check that new details specified conforms to experiment schema
jsonschema.validate(details, schemas["registration_schema"])
registration_key = self.__generate_key(collab_id, project_id, participant_id)
new_registration = {'key': registration_key}
new_registration.update(details)
return super().create(new_registration)
def read_all(self, filter: dict = {}) -> dict:
all_registrations = super().read_all(filter=filter)
cross_linked_registrations = []
for registration in all_registrations:
registration_key = registration['key']
relevant_records = self.__cross_link_subjects(**registration_key)
registration.update(relevant_records)
cross_linked_registrations.append(registration)
return cross_linked_registrations
def read(
self,
collab_id: str,
project_id: str,
participant_id: str
) -> dict:
registration_key = self.__generate_key(collab_id, project_id, participant_id)
registration = super().read(registration_key)
if registration:
relevant_records = self.__cross_link_subjects(**registration_key)
registration.update(relevant_records)
return registration
def update(
self,
collab_id: str,
project_id: str,
participant_id: str,
updates: dict
) -> dict:
registration_key = self.__generate_key(collab_id, project_id, participant_id)
return super().update(registration_key, updates)
def delete(
self,
collab_id: str,
project_id: str,
participant_id: str
) -> dict:
registration_key = self.__generate_key(collab_id, project_id, participant_id)
return super().delete(registration_key)
###############################################
# Data Storage Association class - TagRecords #
###############################################
class TagRecords(AssociationRecords):
""" TagRecords documents the child associations of a participant with its
registered project, archiving data tags used to locate datasets to be
loaded during FL training.
Note: Associative records DO NOT have user-allocated IDs! They are
auto-generated to be used as foreign keys in other downstream
associative records. Tags are associative records and will not
have a tag ID as part of its composite key.
"""
def __init__(self, db_path: str):
super().__init__(
"Tag",
"tag_id",
db_path,
["Alignment"], # downstream relations
*["Registration"] # upstream relations
)
###########
# Helpers #
###########
def __generate_key(
self,
collab_id: str,
project_id: str,
participant_id: str
) -> Dict[str, str]:
return {
'collab_id': collab_id,
'project_id': project_id,
'participant_id': participant_id,
}
##################
# Core Functions #
##################
def create(
self,
collab_id: str,
project_id: str,
participant_id: str,
details: dict
) -> dict:
# Check that new details specified conforms to experiment schema
jsonschema.validate(details, schemas["tag_schema"])
tag_key = self.__generate_key(collab_id, project_id, participant_id)
new_tag = {'key': tag_key}
new_tag.update(details)
return super().create(new_tag)
def read(
self,
collab_id: str,
project_id: str,
participant_id: str
) -> dict:
tag_key = self.__generate_key(collab_id, project_id, participant_id)
return super().read(tag_key)
def update(
self,
collab_id: str,
project_id: str,
participant_id: str,
updates: dict
) -> dict:
tag_key = self.__generate_key(collab_id, project_id, participant_id)
return super().update(tag_key, updates)
def delete(
self,
collab_id: str,
project_id: str,
participant_id: str
) -> dict:
tag_key = self.__generate_key(collab_id, project_id, participant_id)
return super().delete(tag_key)
| 12,330 | 89 | 1,309 |
2892d60bd5cafa38797d03497b75f39a0699992b | 1,368 | py | Python | src/datasets/ConvertLabels.py | kspalm/mlsvm | 808f5b81f2b19d32a1e472bd9b358f7a81e73736 | [
"BSD-2-Clause"
] | 25 | 2016-10-06T18:57:21.000Z | 2022-02-09T20:43:59.000Z | src/datasets/ConvertLabels.py | kspalm/mlsvm | 808f5b81f2b19d32a1e472bd9b358f7a81e73736 | [
"BSD-2-Clause"
] | 7 | 2017-08-22T18:45:19.000Z | 2019-09-20T20:09:35.000Z | src/datasets/ConvertLabels.py | kspalm/mlsvm | 808f5b81f2b19d32a1e472bd9b358f7a81e73736 | [
"BSD-2-Clause"
] | 14 | 2016-06-02T03:47:27.000Z | 2022-03-23T01:36:12.000Z | """
Author: Ehsan Sadrfaridpour
Date: Aug 24, 2018
Purpose: map other labels to -1 and 1 labels,
make sure the number of 1 labels are smaller than the number of -1 labels for MLSVM framework
Usage: define the preferred mapping in the label_map which is a dictionary.
The key is the old/current label(s) in the file which needs to change and the
value(s) are the new labels. For the labels which are ok, you can skip them from
adding them to this dictionary and they will be ignored from conversion.
"""
import pandas as pd
import os
ds_path = '/scratch2/esadrfa/mlsvm_data'
in_ds_fname = 'susy.csv'
out_ds_fname = 'susy_fixed_label.csv'
df = pd.read_csv(os.path.join(ds_path, ds_fname),
header=None, sep=' ', error_bad_lines=False, engine='c')
sep = ' '
label_map = {'0': '-1'}
out_file = open(os.path.join(ds_path, out_ds_fname), 'w')
with open(os.path.join(ds_path, in_ds_fname),'r') as in_file:
for idx, line in enumerate(in_file):
if not idx % 100000: print(idx, end=',')
curr_data = line.split(sep)
if(curr_data[0] in label_map):
curr_data[0] = label_map[curr_data[0]]
for item in curr_data:
out_file.write(item + sep)
# out_file.write('\n') # it has the \n already, this cause empty lines
out_file.close()
print('convert is finished successfully!')
| 31.813953 | 94 | 0.682018 | """
Author: Ehsan Sadrfaridpour
Date: Aug 24, 2018
Purpose: map other labels to -1 and 1 labels,
make sure the number of 1 labels are smaller than the number of -1 labels for MLSVM framework
Usage: define the preferred mapping in the label_map which is a dictionary.
The key is the old/current label(s) in the file which needs to change and the
value(s) are the new labels. For the labels which are ok, you can skip them from
adding them to this dictionary and they will be ignored from conversion.
"""
import pandas as pd
import os
ds_path = '/scratch2/esadrfa/mlsvm_data'
in_ds_fname = 'susy.csv'
out_ds_fname = 'susy_fixed_label.csv'
df = pd.read_csv(os.path.join(ds_path, ds_fname),
header=None, sep=' ', error_bad_lines=False, engine='c')
sep = ' '
label_map = {'0': '-1'}
out_file = open(os.path.join(ds_path, out_ds_fname), 'w')
with open(os.path.join(ds_path, in_ds_fname),'r') as in_file:
for idx, line in enumerate(in_file):
if not idx % 100000: print(idx, end=',')
curr_data = line.split(sep)
if(curr_data[0] in label_map):
curr_data[0] = label_map[curr_data[0]]
for item in curr_data:
out_file.write(item + sep)
# out_file.write('\n') # it has the \n already, this cause empty lines
out_file.close()
print('convert is finished successfully!')
| 0 | 0 | 0 |
31a56f406c538f49948b501841594e3653dd36d6 | 3,976 | py | Python | django/contrib/auth/tests/context_processors.py | svn2github/django | bcacf4b7f26b9898d21816d8f05cb58f2a3730a3 | [
"BSD-3-Clause"
] | 2 | 2020-11-28T20:04:33.000Z | 2021-07-12T19:42:45.000Z | django/contrib/auth/tests/context_processors.py | akaariai/django-old | 45b80c420d6655ec5b86bea3b3c17b4adaa61291 | [
"BSD-3-Clause"
] | 1 | 2019-02-03T08:41:30.000Z | 2019-02-03T08:41:30.000Z | django/contrib/auth/tests/context_processors.py | akaariai/django-old | 45b80c420d6655ec5b86bea3b3c17b4adaa61291 | [
"BSD-3-Clause"
] | null | null | null | import os
from django.conf import global_settings
from django.contrib.auth import authenticate
from django.db.models import Q
from django.template import context
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
USE_TZ=False, # required for loading the fixture
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
urls = 'django.contrib.auth.tests.urls'
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
query = Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| 41.416667 | 81 | 0.669266 | import os
from django.conf import global_settings
from django.contrib.auth import authenticate
from django.db.models import Q
from django.template import context
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(__file__), 'templates'),
),
USE_TZ=False, # required for loading the fixture
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
urls = 'django.contrib.auth.tests.urls'
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
query = Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| 378 | 0 | 54 |
7204828998647edf6ac323e972bb569a6bb9580d | 11,218 | py | Python | file_transformer.py | benkehoe/file-transformer | 9c0d9be0048f4c02eb116f1c672cb5049b858106 | [
"Apache-2.0"
] | null | null | null | file_transformer.py | benkehoe/file-transformer | 9c0d9be0048f4c02eb116f1c672cb5049b858106 | [
"Apache-2.0"
] | 1 | 2018-02-08T19:04:07.000Z | 2018-02-08T19:04:07.000Z | file_transformer.py | benkehoe/file-transformer | 9c0d9be0048f4c02eb116f1c672cb5049b858106 | [
"Apache-2.0"
] | null | null | null | """ Utility functions for creating Python scripts that expect to turn one file into another, or use stdin/stdout as part of a pipeline.
Copyright 2018 Ben Kehoe
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = "1.2.0"
import argparse
import sys
import six
DEFAULT_TO_BINARY_MODE = False
def main(processor,
loader=None,
dumper=None,
parser=None,
args=None,
pre_parse_hook=None,
post_parse_hook=None,
positional_args=None,
parse_known_args=None):
"""Setup the appropriate input and output based on the command line args and
run the given callable processor. The basic arguments allow the program to be
called in the following ways:
prog [-i input_file] [-o output_file]
prog input_file [-o output_file]
prog input_file output_file
The latter two formats can be disabled by specifying positional_args=False
If there is no input or output file given, it will read from stdin or write
to stdout, respectively.
An argparse.ArgumentParser can be provided, as can the arguments to be parsed.
By default, the input is read into a bytestring. If a callable loader is
provided, it is called with the file-like input stream and the parsed args
object and should return the input to pass to the processor.
The processor is called with the input (bytestring or output from loader) and
the parsed args object, and should return the output to write to the file,
normally a bytestring.
If the output of the processor can't be directly written to the output stream,
a callable dumper can be provided, which takes the output from processor, the
output stream, and the parsed args object.
By default, the files are opened in text mode. If binary is desired,
the module field DEFAULT_TO_BINARY_MODE can be set to true. If processor,
loader, or dumper have an attribute named binary, that will be used instead.
Errors are printed to stdout unless the -q flag is given.
"""
xformer = _FileTransformer(
parser=parser,
args_to_parse=args,
pre_parse_hook=pre_parse_hook,
post_parse_hook=post_parse_hook,
positional_args=positional_args,
parse_known_args=parse_known_args)
return xformer.run(processor, loader=loader, dumper=dumper)
def streaming_main(processor,
parser=None,
args=None,
pre_parse_hook=None,
post_parse_hook=None,
positional_args=None,
parse_known_args=None):
"""Identical to main(), but the processor takes as input the file-like
input stream and output stream, and the parsed args object."""
xformer = _FileTransformer(
parser=parser,
args_to_parse=args,
pre_parse_hook=pre_parse_hook,
post_parse_hook=post_parse_hook,
positional_args=positional_args,
parse_known_args=parse_known_args)
return xformer.stream(processor)
def get_io_functions_from_lib(lib, load_func_name='load', dump_func_name='dump', load_kwargs={}, dump_kwargs={}):
"""Helper to create loader and dumper functions for libraries"""
return loader, dumper
def get_pickle_io(load_kwargs={}, dump_kwargs={}, picklelib=None):
"""Returns a loader and dumper for Pickle files"""
return get_io_functions_from_lib(_get_lib(picklelib, 'pickle'), 'load', 'dump', load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
def get_json_io(load_kwargs={}, dump_kwargs={}, jsonlib=None):
"""Returns a loader and dumper for JSON"""
return get_io_functions_from_lib(_get_lib(jsonlib, 'json'), 'load', 'dump', load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
def get_yaml_io(load_kwargs={}, dump_kwargs={}, safe=False, yamllib=None):
"""Returns a loader and dumper for YAML"""
load_func_name = 'safe_load' if safe else 'load'
dump_func_name = 'safe_dump' if safe else 'dump'
loader, dumper = get_io_functions_from_lib(_get_lib(yamllib, 'yaml'), load_func_name, dump_func_name, load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
dumper.binary = False
return loader, dumper
| 36.660131 | 155 | 0.643876 | """ Utility functions for creating Python scripts that expect to turn one file into another, or use stdin/stdout as part of a pipeline.
Copyright 2018 Ben Kehoe
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = "1.2.0"
import argparse
import sys
import six
DEFAULT_TO_BINARY_MODE = False
class _FileTransformer(object):
@classmethod
def description(cls):
"""The description to pass to the ArgumentParser"""
return None
def __init__(self,
parser=None,
args_to_parse=None,
pre_parse_hook=None,
post_parse_hook=None,
positional_args=None,
parse_known_args=None,
):
self.parser = parser or argparse.ArgumentParser(description=self.description())
self.args_to_parse = args_to_parse
if parse_known_args is None:
parse_known_args = False
if positional_args is None:
positional_args = not parse_known_args
if positional_args:
self.parser.add_argument('files', nargs='*')
self.parser.add_argument('-i', '--input', metavar='FILE')
self.parser.add_argument('-o', '--output', metavar='FILE')
self.parser.add_argument('-q', '--quiet', action='store_true', help="Suppress error messages")
if pre_parse_hook:
pre_parse_hook(self.parser)
if parse_known_args:
self.args, remaining_args = self.parser.parse_known_args(args=args_to_parse)
self.args.remaining_args = remaining_args
else:
self.args = self.parser.parse_args(args=args_to_parse)
if not positional_args:
self.args.files = []
self.input = self.args.input
self.output = self.args.output
self.files = self.args.files
if post_parse_hook:
post_parse_hook(self.parser, self.args)
self.verbose = not self.args.quiet
if len(self.files) >= 3:
self.exit(1, "Too many inputs!")
if self.files and (self.input or self.output):
self.exit(1, "Can't specify both args and options")
def exit(self, code, message=None):
if not self.verbose or not message:
message = None
else:
message = message + '\n'
self.parser.exit(code, message)
def _open_file(self, name, mode):
try:
return open(name, mode, 1)
except Exception as e:
self.exit(2, "Could not open file {}: {}".format(name, e))
def _open_input_stream(self, binary=None):
if binary is None:
binary = DEFAULT_TO_BINARY_MODE
mode = 'rb' if binary else 'r'
if self.input:
input_stream = self._open_file(self.input, mode)
elif len(self.files) >= 1:
input_stream = self._open_file(self.files[0],mode)
else:
input_stream = sys.stdin
return input_stream
def _open_output_stream(self, binary=None):
if binary is None:
binary = DEFAULT_TO_BINARY_MODE
mode = 'wb' if binary else 'w'
if self.output:
output_stream = self._open_file(self.output, mode)
elif len(self.files) == 2:
output_stream = self._open_file(self.files[1], mode)
else:
output_stream = sys.stdout
return output_stream
def run(self,
processor,
loader=None,
dumper=None):
input_binary = DEFAULT_TO_BINARY_MODE
output_binary = DEFAULT_TO_BINARY_MODE
if hasattr(processor, 'binary'):
input_binary = getattr(processor, 'binary')
output_binary = getattr(processor, 'binary')
if hasattr(loader, 'binary'):
input_binary = getattr(loader, 'binary')
if hasattr(dumper, 'binary'):
output_binary = getattr(dumper, 'binary')
try:
with self._open_input_stream(binary=input_binary) as input_stream:
if loader:
input = loader(input_stream, self.args)
else:
input = input_stream.read()
output = processor(input, self.args)
with self._open_output_stream(binary=output_binary) as output_stream:
if dumper:
dumper(output, output_stream, self.args)
else:
output_stream.write(output)
except Exception as e:
if self.verbose:
import traceback
traceback.print_exception(*sys.exc_info())
self.exit(3, str(e))
self.exit(0)
def stream(self, processor):
binary = getattr(processor, 'binary', DEFAULT_TO_BINARY_MODE)
try:
with self._open_input_stream(binary=binary) as input_stream, self._open_output_stream(binary=binary) as output_stream:
processor(input_stream, output_stream, self.args)
except Exception as e:
if self.verbose:
import traceback
traceback.print_exception(*sys.exc_info())
self.exit(3, str(e))
self.exit(0)
def main(processor,
loader=None,
dumper=None,
parser=None,
args=None,
pre_parse_hook=None,
post_parse_hook=None,
positional_args=None,
parse_known_args=None):
"""Setup the appropriate input and output based on the command line args and
run the given callable processor. The basic arguments allow the program to be
called in the following ways:
prog [-i input_file] [-o output_file]
prog input_file [-o output_file]
prog input_file output_file
The latter two formats can be disabled by specifying positional_args=False
If there is no input or output file given, it will read from stdin or write
to stdout, respectively.
An argparse.ArgumentParser can be provided, as can the arguments to be parsed.
By default, the input is read into a bytestring. If a callable loader is
provided, it is called with the file-like input stream and the parsed args
object and should return the input to pass to the processor.
The processor is called with the input (bytestring or output from loader) and
the parsed args object, and should return the output to write to the file,
normally a bytestring.
If the output of the processor can't be directly written to the output stream,
a callable dumper can be provided, which takes the output from processor, the
output stream, and the parsed args object.
By default, the files are opened in text mode. If binary is desired,
the module field DEFAULT_TO_BINARY_MODE can be set to true. If processor,
loader, or dumper have an attribute named binary, that will be used instead.
Errors are printed to stdout unless the -q flag is given.
"""
xformer = _FileTransformer(
parser=parser,
args_to_parse=args,
pre_parse_hook=pre_parse_hook,
post_parse_hook=post_parse_hook,
positional_args=positional_args,
parse_known_args=parse_known_args)
return xformer.run(processor, loader=loader, dumper=dumper)
def streaming_main(processor,
parser=None,
args=None,
pre_parse_hook=None,
post_parse_hook=None,
positional_args=None,
parse_known_args=None):
"""Identical to main(), but the processor takes as input the file-like
input stream and output stream, and the parsed args object."""
xformer = _FileTransformer(
parser=parser,
args_to_parse=args,
pre_parse_hook=pre_parse_hook,
post_parse_hook=post_parse_hook,
positional_args=positional_args,
parse_known_args=parse_known_args)
return xformer.stream(processor)
def _get_lib(lib, default_lib_name):
if lib:
return lib
import importlib
return importlib.import_module(default_lib_name)
def get_io_functions_from_lib(lib, load_func_name='load', dump_func_name='dump', load_kwargs={}, dump_kwargs={}):
"""Helper to create loader and dumper functions for libraries"""
def loader(input_stream, args):
return getattr(lib, load_func_name)(input_stream, **load_kwargs)
def dumper(output, output_stream, args):
return getattr(lib, dump_func_name)(output, output_stream, **dump_kwargs)
return loader, dumper
def get_pickle_io(load_kwargs={}, dump_kwargs={}, picklelib=None):
"""Returns a loader and dumper for Pickle files"""
return get_io_functions_from_lib(_get_lib(picklelib, 'pickle'), 'load', 'dump', load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
def get_json_io(load_kwargs={}, dump_kwargs={}, jsonlib=None):
"""Returns a loader and dumper for JSON"""
return get_io_functions_from_lib(_get_lib(jsonlib, 'json'), 'load', 'dump', load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
def get_yaml_io(load_kwargs={}, dump_kwargs={}, safe=False, yamllib=None):
"""Returns a loader and dumper for YAML"""
load_func_name = 'safe_load' if safe else 'load'
dump_func_name = 'safe_dump' if safe else 'dump'
loader, dumper = get_io_functions_from_lib(_get_lib(yamllib, 'yaml'), load_func_name, dump_func_name, load_kwargs=load_kwargs, dump_kwargs=dump_kwargs)
dumper.binary = False
return loader, dumper
def get_ordered_yaml_io(safe=False, yamllib=None, OrderedDict=None):
if not OrderedDict:
from collections import OrderedDict
yaml = _get_lib(yamllib, 'yaml')
Loader = yaml.SafeLoader if safe else yaml.Loader
Dumper = yaml.SafeDumper if safe else yaml.Dumper
def loader(input_stream, args):
class OrderedLoader(Loader):
pass
def constructor(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
constructor)
return yaml.load(input_stream, OrderedLoader)
def dumper(output, output_stream, args):
class OrderedDumper(Dumper):
pass
def representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, representer)
return yaml.dump(output, output_stream, OrderedDumper)
return loader, dumper
| 6,094 | 355 | 121 |
da1566932d6442875c94f95faaa598b3b1adcdab | 5,015 | py | Python | magicclass/widgets/pywidgets/dict.py | hanjinliu/magic-class | 2a9d8af3d385ec3870ebcade9f2dbc03115bed22 | [
"BSD-3-Clause"
] | 15 | 2021-09-07T10:18:59.000Z | 2022-03-23T14:55:45.000Z | magicclass/widgets/pywidgets/dict.py | hanjinliu/magic-class | 2a9d8af3d385ec3870ebcade9f2dbc03115bed22 | [
"BSD-3-Clause"
] | 12 | 2021-09-10T08:54:43.000Z | 2022-03-31T02:43:50.000Z | magicclass/widgets/pywidgets/dict.py | hanjinliu/magic-class | 2a9d8af3d385ec3870ebcade9f2dbc03115bed22 | [
"BSD-3-Clause"
] | 1 | 2022-02-13T15:51:51.000Z | 2022-02-13T15:51:51.000Z | from __future__ import annotations
from typing import Any, Iterable, MutableMapping
from qtpy.QtWidgets import QTableWidget, QTableWidgetItem
from .object import BaseWidget, ContextMenuMixin, PyObjectBound | 32.993421 | 87 | 0.577069 | from __future__ import annotations
from typing import Any, Iterable, MutableMapping
from qtpy.QtWidgets import QTableWidget, QTableWidgetItem
from .object import BaseWidget, ContextMenuMixin, PyObjectBound
class DictWidget(BaseWidget, MutableMapping):
def __init__(self, value=None, **kwargs):
super().__init__(**kwargs)
self._tablewidget = PyTableWidget(self.native)
self._tablewidget.setParentWidget(self)
self._tablewidget.setEditTriggers(QTableWidget.NoEditTriggers)
self._tablewidget.verticalHeader().setDefaultSectionSize(30)
self._dict: dict[str, int] = {} # mapping from key to row
self.set_widget(self._tablewidget)
@self._tablewidget.itemDoubleClicked.connect
def _(item: PyTableWidgetItem):
type_ = type(item.obj)
callbacks = self._callbacks.get(type_, [])
self.running = True
try:
for callback in callbacks:
try:
callback(item.obj, self._tablewidget.row(item))
except TypeError:
callback(item.obj)
finally:
self.running = False
if value is not None:
self.update(dict(value))
def __len__(self) -> int:
return self._tablewidget.rowCount()
@property
def value(self) -> dict[str, Any]:
return {k: self._tablewidget.item(row, 0) for k, row in self._dict}
def __getitem__(self, k: str) -> Any:
row = self._dict[k]
return self._tablewidget.item(row, 0).obj
def __setitem__(self, k: str, obj: Any) -> None:
if not isinstance(k, str):
raise ValueError("Can only use str type as keys.")
if k in self._dict.keys():
row = self._dict[k]
else:
row = len(self)
self._dict[k] = row
self._tablewidget.insertRow(row)
if row == 0:
self._tablewidget.insertColumn(0)
self._tablewidget.setHorizontalHeaderItem(0, QTableWidgetItem("value"))
key_item = QTableWidgetItem(k)
self._tablewidget.setVerticalHeaderItem(row, key_item)
name = self._delegates.get(type(obj), str)(obj)
value_item = PyTableWidgetItem(obj, name)
tooltip = self._tooltip.get(type(obj), str)(obj)
value_item.setToolTip(tooltip)
self._tablewidget.setItem(row, 0, value_item)
def __delitem__(self, k: str) -> None:
row = self._dict.pop(k)
self._tablewidget.removeRow(row)
def __iter__(self) -> Iterable[str]:
return iter(self._dict)
def keys(self):
"""
Return the view of dictionary keys.
"""
return self._dict.keys()
def values(self) -> DictValueView:
"""
Return the view of dictionary values as Python objects.
"""
return DictValueView(self._tablewidget)
def items(self) -> DictItemView:
"""
Return the view of dictionary keys and values as strings and Python objects.
"""
return DictItemView(self._tablewidget)
def update(self, d: dict[str, Any]):
"""
Update the dictionary contents.
"""
for k, v in d.items():
self[k] = v
def clear(self) -> None:
"""
Clear dictionary contents.
"""
self._tablewidget.clear()
self._dict.clear()
def pop(self, k: str):
"""
Pop a dictionary content.
"""
row = self._dict.pop(k)
out = self._tablewidget.item(row, 0).obj
self._tablewidget.removeRow(row)
return out
def get(self, k: str, default=None):
self._dict.get(k, default)
class PyTableWidget(ContextMenuMixin, QTableWidget):
def item(self, row: int, column: int) -> PyTableWidgetItem:
return super().item(row, column)
def itemAt(self, *p) -> PyTableWidgetItem:
return super().itemAt(*p)
def __init__(self, parent: None) -> None:
super().__init__(parent=parent)
self.setContextMenu()
class PyTableWidgetItem(PyObjectBound, QTableWidgetItem):
def __init__(self, obj=None, name=None):
super().__init__()
self.setObject(obj, name)
class DictValueView:
def __init__(self, widget: PyTableWidget):
self.widget = widget
def __iter__(self):
for row in range(self.widget.rowCount()):
yield self.widget.item(row, 0).obj
class DictItemView:
def __init__(self, widget: PyTableWidget):
self.widget = widget
def __iter__(self):
for row in range(self.widget.rowCount()):
key = self.widget.verticalHeaderItem(row).text()
value = self.widget.item(row, 0).obj
yield key, value | 2,987 | 1,463 | 359 |
56c5e20da2cd5885b05e167a228b34272f42dc84 | 667 | py | Python | anormbookmarker/cli/visualization/sa_display.py | jakeogh/anormbookmarker | d8516cc47dd7e5a5484eb9c1e8f44155b7663897 | [
"MIT"
] | 2 | 2017-05-08T04:44:56.000Z | 2017-08-21T06:41:05.000Z | anormbookmarker/cli/visualization/sa_display.py | jakeogh/anormbookmarker | d8516cc47dd7e5a5484eb9c1e8f44155b7663897 | [
"MIT"
] | null | null | null | anormbookmarker/cli/visualization/sa_display.py | jakeogh/anormbookmarker | d8516cc47dd7e5a5484eb9c1e8f44155b7663897 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import click
from anormbookmarker.model.__model__ import *
from anormbookmarker.model.BookmarkClassConstructor import tagbookmarks_table
from anormbookmarker.model.Word import WordMisSpelling
from anormbookmarker.test.test_enviroment import Tag
from anormbookmarker.test.test_enviroment import Bookmark
from kcl.sqlalchemy.model.Filename import Filename
#from kcl.sqlalchemy.model.FileRecord import Filename
#from kcl.sqlalchemy.model.FileRecord import Path
from kcl.sqlalchemy.visualization.sa_display import sa_display as kcl_sa_display
@click.command()
| 35.105263 | 80 | 0.841079 | #!/usr/bin/env python3
import click
from anormbookmarker.model.__model__ import *
from anormbookmarker.model.BookmarkClassConstructor import tagbookmarks_table
from anormbookmarker.model.Word import WordMisSpelling
from anormbookmarker.test.test_enviroment import Tag
from anormbookmarker.test.test_enviroment import Bookmark
from kcl.sqlalchemy.model.Filename import Filename
#from kcl.sqlalchemy.model.FileRecord import Filename
#from kcl.sqlalchemy.model.FileRecord import Path
from kcl.sqlalchemy.visualization.sa_display import sa_display as kcl_sa_display
@click.command()
def sa_display():
#import IPython; IPython.embed()
kcl_sa_display(globals())
| 63 | 0 | 22 |
9b5abae420a1b31c6db257935b5a0ebcff7174af | 144 | py | Python | post-setup.py | TechWritingWhiz/sovrin-client | b5633dd7767b4aaf08f622181f3937a104b290fb | [
"Apache-2.0"
] | 13 | 2017-02-16T11:45:50.000Z | 2017-06-13T20:07:51.000Z | post-setup.py | TechWritingWhiz/sovrin-client | b5633dd7767b4aaf08f622181f3937a104b290fb | [
"Apache-2.0"
] | 20 | 2017-01-27T14:37:07.000Z | 2017-06-28T10:41:39.000Z | post-setup.py | TechWritingWhiz/sovrin-client | b5633dd7767b4aaf08f622181f3937a104b290fb | [
"Apache-2.0"
] | 57 | 2017-01-21T22:29:27.000Z | 2017-06-29T10:24:19.000Z | import os
from sovrin_common.setup_util import Setup
BASE_DIR = os.path.join(os.path.expanduser("~"), ".sovrin")
Setup(BASE_DIR).setupClient() | 24 | 59 | 0.763889 | import os
from sovrin_common.setup_util import Setup
BASE_DIR = os.path.join(os.path.expanduser("~"), ".sovrin")
Setup(BASE_DIR).setupClient() | 0 | 0 | 0 |
bdd459bfa527d1b8bc8e4ac5b06b6b88f6c4f534 | 9,573 | py | Python | utils/decode_can_frame.py | FutureInLogistics/moteus | 604799baf4fd6e7a9c9f9eab556596c98c112dc0 | [
"Apache-2.0"
] | null | null | null | utils/decode_can_frame.py | FutureInLogistics/moteus | 604799baf4fd6e7a9c9f9eab556596c98c112dc0 | [
"Apache-2.0"
] | null | null | null | utils/decode_can_frame.py | FutureInLogistics/moteus | 604799baf4fd6e7a9c9f9eab556596c98c112dc0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -B
# Copyright 2022 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import enum
import moteus
import struct
SCALE_TYPES = [
ScaleType([moteus.Register.POSITION,
moteus.Register.ABS_POSITION,
moteus.Register.COMMAND_POSITION,
moteus.Register.COMMAND_STOP_POSITION,
moteus.Register.COMMAND_WITHIN_LOWER_BOUND],
0.01, 0.0001, 0.00001),
ScaleType([moteus.Register.VELOCITY,
moteus.Register.COMMAND_VELOCITY,
moteus.Register.COMMAND_VELOCITY_LIMIT],
0.1, 0.00025, 0.00001),
ScaleType([moteus.Register.TORQUE,
moteus.Register.COMMAND_FEEDFORWARD_TORQUE,
moteus.Register.COMMAND_POSITION_MAX_TORQUE,
moteus.Register.POSITION_FEEDFORWARD,
moteus.Register.POSITION_COMMAND,
moteus.Register.COMMAND_WITHIN_FEEDFORWARD_TORQUE,
moteus.Register.COMMAND_WITHIN_MAX_TORQUE],
0.5, 0.01, 0.001),
ScaleType([moteus.Register.Q_CURRENT,
moteus.Register.D_CURRENT,
moteus.Register.COMMAND_Q_CURRENT,
moteus.Register.COMMAND_D_CURRENT],
1.0, 0.1, 0.001),
ScaleType([moteus.Register.VOLTAGE,
moteus.Register.VOLTAGE_PHASE_A,
moteus.Register.VOLTAGE_PHASE_B,
moteus.Register.VOLTAGE_PHASE_C,
moteus.Register.VFOC_VOLTAGE,
moteus.Register.VOLTAGEDQ_D,
moteus.Register.VOLTAGEDQ_Q],
0.5, 0.1, 0.001),
ScaleType([moteus.Register.TEMPERATURE],
1.0, 0.1, 0.001),
ScaleType([moteus.Register.PWM_PHASE_A,
moteus.Register.PWM_PHASE_B,
moteus.Register.PWM_PHASE_C,
moteus.Register.COMMAND_KP_SCALE,
moteus.Register.COMMAND_KD_SCALE,
moteus.Register.COMMAND_WITHIN_KP_SCALE,
moteus.Register.COMMAND_WITHIN_KD_SCALE],
1.0 / 127.0, 1.0 / 32767.0, 1.0 / 2147483647.0),
ScaleType([moteus.Register.COMMAND_ACCEL_LIMIT],
0.05, 0.001, 0.00001),
ScaleType([moteus.Register.COMMAND_TIMEOUT,
moteus.Register.COMMAND_WITHIN_TIMEOUT],
0.01, 0.001, 0.000001),
]
if __name__ == '__main__':
main()
| 35.194853 | 114 | 0.590306 | #!/usr/bin/python3 -B
# Copyright 2022 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import enum
import moteus
import struct
class Command(enum.IntEnum):
WRITE_REGISTERS = 0x00
READ_REGISTERS = 0x10
REPLY = 0x20
READ_ERROR = 0x30
WRITE_ERROR = 0x31
STREAM_CLIENT_TO_SERVER = 0x40
STREAM_SERVER_TO_CLIENT = 0x41
STREAM_CLIENT_POLL_SERVER = 0x42
NOP = 0x50
class Type(enum.IntEnum):
INT8 = 0
INT16 = 1
INT32 = 2
F32 = 3
class Stream:
def __init__(self, data):
self.data = data
def remaining(self):
return len(self.data)
def _read_byte(self):
result, self.data = self.data[0:1], self.data[1:]
return result[0]
def _read_value(self, size):
result, self.data = self.data[0:size], self.data[size:]
return result
def read_struct(self, fmt):
s = struct.Struct(fmt)
data = self._read_value(s.size)
return data, s.unpack(data)[0]
def read_int8(self):
return self.read_struct('<b')
def read_int16(self):
return self.read_struct('<h')
def read_int32(self):
return self.read_struct('<i')
def read_f32(self):
return self.read_struct('<f')
def read_varuint(self):
result_number = 0
result_data = bytes([])
shift = 0
for i in range(5):
this_byte = self._read_byte()
result_data = result_data + bytes([this_byte])
result_number |= (this_byte & 0x7f) << shift
shift += 7
if (this_byte & 0x80) == 0:
return result_data, result_number
raise RuntimeError(f'Invalid varuint {result_data.hex()}')
def read_type(self, typecode):
if typecode == int(Type.INT8):
return self.read_int8()
elif typecode == int(Type.INT16):
return self.read_int16()
elif typecode == int(Type.INT32):
return self.read_int32()
elif typecode == int(Type.F32):
return self.read_f32()
raise RuntimeError(f'Unknown type: {typecode}')
def format_reg(reg):
try:
typedreg = moteus.Register(reg)
return f'0x{reg:03x}({typedreg.name})'
except TypeError:
return f'0x{reg:03x}'
def IsNan(typecode, value):
if typecode == Type.INT8 and value == -(2**7):
return True
elif typecode == Type.INT16 and value == -(2**15):
return True
elif typecode == Type.INT32 and value == -(2**31):
return True
return False
def format_scaled(typecode, value, int8_scale, int16_scale, int32_scale):
if IsNan(typecode, value):
return f'{value} (NaN)'
if typecode == Type.INT8:
return f'{value} ({value * int8_scale})'
elif typecode == Type.INT16:
return f'{value} ({value * int16_scale})'
elif typecode == Type.INT32:
return f'{value} ({value * int32_scale})'
else:
return f'{value}'
class ScaleType:
def __init__(self, registers, int8_scale, int16_scale, int32_scale):
self.registers = registers
self.int8_scale = int8_scale
self.int16_scale = int16_scale
self.int32_scale = int32_scale
SCALE_TYPES = [
ScaleType([moteus.Register.POSITION,
moteus.Register.ABS_POSITION,
moteus.Register.COMMAND_POSITION,
moteus.Register.COMMAND_STOP_POSITION,
moteus.Register.COMMAND_WITHIN_LOWER_BOUND],
0.01, 0.0001, 0.00001),
ScaleType([moteus.Register.VELOCITY,
moteus.Register.COMMAND_VELOCITY,
moteus.Register.COMMAND_VELOCITY_LIMIT],
0.1, 0.00025, 0.00001),
ScaleType([moteus.Register.TORQUE,
moteus.Register.COMMAND_FEEDFORWARD_TORQUE,
moteus.Register.COMMAND_POSITION_MAX_TORQUE,
moteus.Register.POSITION_FEEDFORWARD,
moteus.Register.POSITION_COMMAND,
moteus.Register.COMMAND_WITHIN_FEEDFORWARD_TORQUE,
moteus.Register.COMMAND_WITHIN_MAX_TORQUE],
0.5, 0.01, 0.001),
ScaleType([moteus.Register.Q_CURRENT,
moteus.Register.D_CURRENT,
moteus.Register.COMMAND_Q_CURRENT,
moteus.Register.COMMAND_D_CURRENT],
1.0, 0.1, 0.001),
ScaleType([moteus.Register.VOLTAGE,
moteus.Register.VOLTAGE_PHASE_A,
moteus.Register.VOLTAGE_PHASE_B,
moteus.Register.VOLTAGE_PHASE_C,
moteus.Register.VFOC_VOLTAGE,
moteus.Register.VOLTAGEDQ_D,
moteus.Register.VOLTAGEDQ_Q],
0.5, 0.1, 0.001),
ScaleType([moteus.Register.TEMPERATURE],
1.0, 0.1, 0.001),
ScaleType([moteus.Register.PWM_PHASE_A,
moteus.Register.PWM_PHASE_B,
moteus.Register.PWM_PHASE_C,
moteus.Register.COMMAND_KP_SCALE,
moteus.Register.COMMAND_KD_SCALE,
moteus.Register.COMMAND_WITHIN_KP_SCALE,
moteus.Register.COMMAND_WITHIN_KD_SCALE],
1.0 / 127.0, 1.0 / 32767.0, 1.0 / 2147483647.0),
ScaleType([moteus.Register.COMMAND_ACCEL_LIMIT],
0.05, 0.001, 0.00001),
ScaleType([moteus.Register.COMMAND_TIMEOUT,
moteus.Register.COMMAND_WITHIN_TIMEOUT],
0.01, 0.001, 0.000001),
]
def format_value(reg, typecode, value):
if reg == 0:
try:
return f'{value}({moteus.Mode(value).name})'
except TypeError:
return f'{value}'
for scale_type in SCALE_TYPES:
if reg in scale_type.registers:
return format_scaled(typecode, value,
scale_type.int8_scale,
scale_type.int16_scale,
scale_type.int32_scale)
return f'{value}'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('hexcan', nargs='?',
help='Hex encoded CAN frame')
args = parser.parse_args()
stream = Stream(bytes.fromhex(args.hexcan))
while stream.remaining():
data, cmd = stream.read_int8()
print(f'{data.hex()} - ', end='')
upper = cmd & 0xf0
maybe_type = (cmd & 0b00001100) >> 2
maybe_num = cmd & 0b00000011
if upper == int(Command.READ_REGISTERS):
print(f'READ_REGISTERS - {Type(maybe_type).name} ', end='')
if maybe_num > 0:
length = maybe_num
print(f'{length} registers')
else:
length_data, length = stream.read_varuint()
print(f'\n {length_data.hex()} - {length} registers')
start_reg_data, start_reg = stream.read_varuint()
print(f' {start_reg_data.hex()} - Starting at reg {format_reg(start_reg)}')
elif (upper == int(Command.WRITE_REGISTERS) or
upper == int(Command.REPLY)):
print(f'{Command(upper).name} - {str(Type(maybe_type))} ', end='')
if maybe_num > 0:
length = maybe_num
print(f'{length} registers')
else:
length_data, length = stream.read_varuint()
print(f'\n {length_data.hex()} - {length} registers')
start_reg_data, start_reg = stream.read_varuint()
print(f' {start_reg_data.hex()} - Starting at reg {format_reg(start_reg)}')
cur_reg = start_reg
for i in range(length):
data, value = stream.read_type(maybe_type)
print(f' {data.hex()} - Reg {format_reg(cur_reg)} = {format_value(cur_reg, maybe_type, value)}')
cur_reg += 1
elif (cmd == Command.READ_ERROR or
cmd == Command.WRITE_ERROR):
print(f'{Command(cmd).name}')
data, value = stream.read_varuint()
print(f' {data.hex()} - register {format_reg(value)}')
data, value = stream.read_varuint()
print(f' {data.hex()} - error {value}')
elif (cmd == Command.STREAM_CLIENT_TO_SERVER or
cmd == Command.STREAM_SERVER_TO_CLIENT):
print(f'{Command(cmd).name}')
channel_data, channel = stream.read_varuint()
print(f' {channel_data.hex()} - channel {channel}')
nbytes_data, nbytes = stream.read_varuint()
print(f' {nbytes_data.hex()} - {nbytes} bytes')
data = stream._read_value(nbytes)
print(f' {data.hex()} - {data}')
elif cmd == Command.STREAM_CLIENT_POLL_SERVER:
print(f'{Command(cmd).name}')
channel_data, channel = stream.read_varuint()
print(f' {channel_data.hex()} - channel {channel}')
nbytes_data, nbytes = stream.read_varuint()
print(f' {nbytes_data.hex()} - at most {nbytes} bytes')
elif cmd == Command.NOP:
print(f'{Command(cmd).name}')
if __name__ == '__main__':
main()
| 5,850 | 288 | 529 |
1d3abc324462f5f95eba1671bf3a643a598d1332 | 1,147 | py | Python | setup.py | danielfeloiola/dpckan | 9aea7aa1d7137dca5adf7ad95d8a6d148ab337e5 | [
"MIT"
] | null | null | null | setup.py | danielfeloiola/dpckan | 9aea7aa1d7137dca5adf7ad95d8a6d148ab337e5 | [
"MIT"
] | null | null | null | setup.py | danielfeloiola/dpckan | 9aea7aa1d7137dca5adf7ad95d8a6d148ab337e5 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
import codecs
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import package_information
if __name__ == '__main__':
# Setting up
setup(
name=package_information.name,
version=package_information.version,
author=package_information.author,
author_email=package_information.email_author,
description=package_information.description,
long_description_content_type="text/markdown",
long_description=open('README.md').read() + '\n\n' + open('CHANGELOG.md').read(),
url="https://github.com/dados-mg/dpkgckanmg",
packages=find_packages(),
install_requires=open('requirements.txt').read(),
keywords=['python', 'ckan'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
entry_points="""
[console_scripts]
dpckan=dpckan.cli:cli
"""
)
| 32.771429 | 87 | 0.646905 | from setuptools import setup, find_packages
import codecs
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import package_information
if __name__ == '__main__':
# Setting up
setup(
name=package_information.name,
version=package_information.version,
author=package_information.author,
author_email=package_information.email_author,
description=package_information.description,
long_description_content_type="text/markdown",
long_description=open('README.md').read() + '\n\n' + open('CHANGELOG.md').read(),
url="https://github.com/dados-mg/dpkgckanmg",
packages=find_packages(),
install_requires=open('requirements.txt').read(),
keywords=['python', 'ckan'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
],
entry_points="""
[console_scripts]
dpckan=dpckan.cli:cli
"""
)
| 0 | 0 | 0 |
d041bf5d099ae907896e7e3de2df467e151b4282 | 9,392 | py | Python | scripts/main.py | anuprulez/similar_galaxy_workflow | b9771e58ad4e6e5b61909b284d4c7d2645626525 | [
"MIT"
] | 1 | 2020-05-31T14:09:58.000Z | 2020-05-31T14:09:58.000Z | scripts/main.py | anuprulez/similar_galaxy_workflow | b9771e58ad4e6e5b61909b284d4c7d2645626525 | [
"MIT"
] | 3 | 2018-04-23T21:02:21.000Z | 2019-10-18T09:05:02.000Z | scripts/main.py | anuprulez/similar_galaxy_workflow | b9771e58ad4e6e5b61909b284d4c7d2645626525 | [
"MIT"
] | 3 | 2018-03-22T08:54:07.000Z | 2019-07-04T17:00:03.000Z | """
Predict next tools in the Galaxy workflows
using machine learning (recurrent neural network)
"""
import numpy as np
import argparse
import time
# machine learning library
import tensorflow as tf
from keras import backend as K
import keras.callbacks as callbacks
import extract_workflow_connections
import prepare_data
import optimise_hyperparameters
import utils
if __name__ == "__main__":
start_time = time.time()
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-wf", "--workflow_file", required=True, help="workflows tabular file")
arg_parser.add_argument("-tu", "--tool_usage_file", required=True, help="tool usage file")
arg_parser.add_argument("-om", "--output_model", required=True, help="trained model file")
# data parameters
arg_parser.add_argument("-cd", "--cutoff_date", required=True, help="earliest date for taking tool usage")
arg_parser.add_argument("-pl", "--maximum_path_length", required=True, help="maximum length of tool path")
arg_parser.add_argument("-ep", "--n_epochs", required=True, help="number of iterations to run to create model")
arg_parser.add_argument("-oe", "--optimize_n_epochs", required=True, help="number of iterations to run to find best model parameters")
arg_parser.add_argument("-me", "--max_evals", required=True, help="maximum number of configuration evaluations")
arg_parser.add_argument("-ts", "--test_share", required=True, help="share of data to be used for testing")
arg_parser.add_argument("-vs", "--validation_share", required=True, help="share of data to be used for validation")
# neural network parameters
arg_parser.add_argument("-bs", "--batch_size", required=True, help="size of the tranining batch i.e. the number of samples per batch")
arg_parser.add_argument("-ut", "--units", required=True, help="number of hidden recurrent units")
arg_parser.add_argument("-es", "--embedding_size", required=True, help="size of the fixed vector learned for each tool")
arg_parser.add_argument("-dt", "--dropout", required=True, help="percentage of neurons to be dropped")
arg_parser.add_argument("-sd", "--spatial_dropout", required=True, help="1d dropout used for embedding layer")
arg_parser.add_argument("-rd", "--recurrent_dropout", required=True, help="dropout for the recurrent layers")
arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate")
arg_parser.add_argument("-ar", "--activation_recurrent", required=True, help="activation function for recurrent layers")
arg_parser.add_argument("-ao", "--activation_output", required=True, help="activation function for output layers")
arg_parser.add_argument("-cpus", "--num_cpus", required=True, help="number of cpus for parallelism")
# get argument values
args = vars(arg_parser.parse_args())
tool_usage_path = args["tool_usage_file"]
workflows_path = args["workflow_file"]
cutoff_date = args["cutoff_date"]
maximum_path_length = int(args["maximum_path_length"])
trained_model_path = args["output_model"]
n_epochs = int(args["n_epochs"])
optimize_n_epochs = int(args["optimize_n_epochs"])
max_evals = int(args["max_evals"])
test_share = float(args["test_share"])
validation_share = float(args["validation_share"])
batch_size = args["batch_size"]
units = args["units"]
embedding_size = args["embedding_size"]
dropout = args["dropout"]
spatial_dropout = args["spatial_dropout"]
recurrent_dropout = args["recurrent_dropout"]
learning_rate = args["learning_rate"]
activation_recurrent = args["activation_recurrent"]
activation_output = args["activation_output"]
num_cpus = int(args["num_cpus"])
config = {
'cutoff_date': cutoff_date,
'maximum_path_length': maximum_path_length,
'n_epochs': n_epochs,
'optimize_n_epochs': optimize_n_epochs,
'max_evals': max_evals,
'test_share': test_share,
'validation_share': validation_share,
'batch_size': batch_size,
'units': units,
'embedding_size': embedding_size,
'dropout': dropout,
'spatial_dropout': spatial_dropout,
'recurrent_dropout': recurrent_dropout,
'learning_rate': learning_rate,
'activation_recurrent': activation_recurrent,
'activation_output': activation_output
}
# Extract and process workflows
connections = extract_workflow_connections.ExtractWorkflowConnections()
workflow_paths, compatible_next_tools = connections.read_tabular_file(workflows_path)
# Process the paths from workflows
print("Dividing data...")
data = prepare_data.PrepareData(maximum_path_length, test_share)
train_data, train_labels, test_data, test_labels, data_dictionary, reverse_dictionary, class_weights, usage_pred = data.get_data_labels_matrices(workflow_paths, tool_usage_path, cutoff_date, compatible_next_tools)
# find the best model and start training
predict_tool = PredictTool(num_cpus)
# start training with weighted classes
print("Training with weighted classes and samples ...")
results_weighted = predict_tool.find_train_best_network(config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, compatible_next_tools)
print()
print("Best parameters \n")
print(results_weighted["best_parameters"])
print()
utils.save_model(results_weighted, data_dictionary, compatible_next_tools, trained_model_path, class_weights)
end_time = time.time()
print()
print("Program finished in %s seconds" % str(end_time - start_time))
| 48.412371 | 217 | 0.701874 | """
Predict next tools in the Galaxy workflows
using machine learning (recurrent neural network)
"""
import numpy as np
import argparse
import time
# machine learning library
import tensorflow as tf
from keras import backend as K
import keras.callbacks as callbacks
import extract_workflow_connections
import prepare_data
import optimise_hyperparameters
import utils
class PredictTool:
@classmethod
def __init__(self, num_cpus):
""" Init method. """
# set the number of cpus
cpu_config = tf.ConfigProto(
device_count={"CPU": num_cpus},
intra_op_parallelism_threads=num_cpus,
inter_op_parallelism_threads=num_cpus,
allow_soft_placement=True
)
K.set_session(tf.Session(config=cpu_config))
@classmethod
def find_train_best_network(self, network_config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, compatible_next_tools):
"""
Define recurrent neural network and train sequential data
"""
print("Start hyperparameter optimisation...")
hyper_opt = optimise_hyperparameters.HyperparameterOptimisation()
best_params, best_model = hyper_opt.train_model(network_config, reverse_dictionary, train_data, train_labels, class_weights)
# define callbacks
early_stopping = callbacks.EarlyStopping(monitor='loss', mode='min', verbose=1, min_delta=1e-4, restore_best_weights=True)
predict_callback_test = PredictCallback(test_data, test_labels, reverse_dictionary, n_epochs, compatible_next_tools, usage_pred)
callbacks_list = [predict_callback_test, early_stopping]
print("Start training on the best model...")
train_performance = dict()
if len(test_data) > 0:
trained_model = best_model.fit(
train_data,
train_labels,
batch_size=int(best_params["batch_size"]),
epochs=n_epochs,
verbose=2,
callbacks=callbacks_list,
shuffle="batch",
validation_data=(test_data, test_labels)
)
train_performance["validation_loss"] = np.array(trained_model.history["val_loss"])
train_performance["precision"] = predict_callback_test.precision
train_performance["usage_weights"] = predict_callback_test.usage_weights
else:
trained_model = best_model.fit(
train_data,
train_labels,
batch_size=int(best_params["batch_size"]),
epochs=n_epochs,
verbose=2,
callbacks=callbacks_list,
shuffle="batch"
)
train_performance["train_loss"] = np.array(trained_model.history["loss"])
train_performance["model"] = best_model
train_performance["best_parameters"] = best_params
return train_performance
class PredictCallback(callbacks.Callback):
def __init__(self, test_data, test_labels, reverse_data_dictionary, n_epochs, next_compatible_tools, usg_scores):
self.test_data = test_data
self.test_labels = test_labels
self.reverse_data_dictionary = reverse_data_dictionary
self.precision = list()
self.usage_weights = list()
self.n_epochs = n_epochs
self.next_compatible_tools = next_compatible_tools
self.pred_usage_scores = usg_scores
def on_epoch_end(self, epoch, logs={}):
"""
Compute absolute and compatible precision for test data
"""
if len(self.test_data) > 0:
precision, usage_weights = utils.verify_model(self.model, self.test_data, self.test_labels, self.reverse_data_dictionary, self.next_compatible_tools, self.pred_usage_scores)
self.precision.append(precision)
self.usage_weights.append(usage_weights)
print("Epoch %d precision: %s" % (epoch + 1, precision))
print("Epoch %d usage weights: %s" % (epoch + 1, usage_weights))
if __name__ == "__main__":
start_time = time.time()
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-wf", "--workflow_file", required=True, help="workflows tabular file")
arg_parser.add_argument("-tu", "--tool_usage_file", required=True, help="tool usage file")
arg_parser.add_argument("-om", "--output_model", required=True, help="trained model file")
# data parameters
arg_parser.add_argument("-cd", "--cutoff_date", required=True, help="earliest date for taking tool usage")
arg_parser.add_argument("-pl", "--maximum_path_length", required=True, help="maximum length of tool path")
arg_parser.add_argument("-ep", "--n_epochs", required=True, help="number of iterations to run to create model")
arg_parser.add_argument("-oe", "--optimize_n_epochs", required=True, help="number of iterations to run to find best model parameters")
arg_parser.add_argument("-me", "--max_evals", required=True, help="maximum number of configuration evaluations")
arg_parser.add_argument("-ts", "--test_share", required=True, help="share of data to be used for testing")
arg_parser.add_argument("-vs", "--validation_share", required=True, help="share of data to be used for validation")
# neural network parameters
arg_parser.add_argument("-bs", "--batch_size", required=True, help="size of the tranining batch i.e. the number of samples per batch")
arg_parser.add_argument("-ut", "--units", required=True, help="number of hidden recurrent units")
arg_parser.add_argument("-es", "--embedding_size", required=True, help="size of the fixed vector learned for each tool")
arg_parser.add_argument("-dt", "--dropout", required=True, help="percentage of neurons to be dropped")
arg_parser.add_argument("-sd", "--spatial_dropout", required=True, help="1d dropout used for embedding layer")
arg_parser.add_argument("-rd", "--recurrent_dropout", required=True, help="dropout for the recurrent layers")
arg_parser.add_argument("-lr", "--learning_rate", required=True, help="learning rate")
arg_parser.add_argument("-ar", "--activation_recurrent", required=True, help="activation function for recurrent layers")
arg_parser.add_argument("-ao", "--activation_output", required=True, help="activation function for output layers")
arg_parser.add_argument("-cpus", "--num_cpus", required=True, help="number of cpus for parallelism")
# get argument values
args = vars(arg_parser.parse_args())
tool_usage_path = args["tool_usage_file"]
workflows_path = args["workflow_file"]
cutoff_date = args["cutoff_date"]
maximum_path_length = int(args["maximum_path_length"])
trained_model_path = args["output_model"]
n_epochs = int(args["n_epochs"])
optimize_n_epochs = int(args["optimize_n_epochs"])
max_evals = int(args["max_evals"])
test_share = float(args["test_share"])
validation_share = float(args["validation_share"])
batch_size = args["batch_size"]
units = args["units"]
embedding_size = args["embedding_size"]
dropout = args["dropout"]
spatial_dropout = args["spatial_dropout"]
recurrent_dropout = args["recurrent_dropout"]
learning_rate = args["learning_rate"]
activation_recurrent = args["activation_recurrent"]
activation_output = args["activation_output"]
num_cpus = int(args["num_cpus"])
config = {
'cutoff_date': cutoff_date,
'maximum_path_length': maximum_path_length,
'n_epochs': n_epochs,
'optimize_n_epochs': optimize_n_epochs,
'max_evals': max_evals,
'test_share': test_share,
'validation_share': validation_share,
'batch_size': batch_size,
'units': units,
'embedding_size': embedding_size,
'dropout': dropout,
'spatial_dropout': spatial_dropout,
'recurrent_dropout': recurrent_dropout,
'learning_rate': learning_rate,
'activation_recurrent': activation_recurrent,
'activation_output': activation_output
}
# Extract and process workflows
connections = extract_workflow_connections.ExtractWorkflowConnections()
workflow_paths, compatible_next_tools = connections.read_tabular_file(workflows_path)
# Process the paths from workflows
print("Dividing data...")
data = prepare_data.PrepareData(maximum_path_length, test_share)
train_data, train_labels, test_data, test_labels, data_dictionary, reverse_dictionary, class_weights, usage_pred = data.get_data_labels_matrices(workflow_paths, tool_usage_path, cutoff_date, compatible_next_tools)
# find the best model and start training
predict_tool = PredictTool(num_cpus)
# start training with weighted classes
print("Training with weighted classes and samples ...")
results_weighted = predict_tool.find_train_best_network(config, reverse_dictionary, train_data, train_labels, test_data, test_labels, n_epochs, class_weights, usage_pred, compatible_next_tools)
print()
print("Best parameters \n")
print(results_weighted["best_parameters"])
print()
utils.save_model(results_weighted, data_dictionary, compatible_next_tools, trained_model_path, class_weights)
end_time = time.time()
print()
print("Program finished in %s seconds" % str(end_time - start_time))
| 433 | 3,246 | 46 |
7a29ff1e2d77b9832f3d48ceb56b6ac1578a9d19 | 4,935 | py | Python | faces/challenge.py | paskino/ML-exercises | 6f6586595583c9d85be72944ce8a1a97fdcd0d91 | [
"BSD-3-Clause"
] | null | null | null | faces/challenge.py | paskino/ML-exercises | 6f6586595583c9d85be72944ce8a1a97fdcd0d91 | [
"BSD-3-Clause"
] | 2 | 2018-10-21T15:59:43.000Z | 2019-01-27T15:30:52.000Z | faces/challenge.py | paskino/ML-exercises | 6f6586595583c9d85be72944ce8a1a97fdcd0d91 | [
"BSD-3-Clause"
] | null | null | null | '''
Facial recognition data challenge
Data from Evgueni Ovtchinnikov: https://www.dropbox.com/sh/a62wxyw9fpzwt95/AABJE0CEAtqOuLXKo_sOTFMVa?dl=0
https://github.com/evgueni-ovtchinnikov
1. clean the dataset: select only images with more than one face
2. select 70% train 30% cross validation
'''
import numpy
from functools import reduce
import matplotlib.pyplot as plt
import pickle
__version__ = '0.1.0'
#from docopt import docopt
'''
args = docopt(__doc__, version=__version__)
file = args['<images>']
pref = args['<prefix>']
path = args['--path']
print('loading images from %s...' % (path + '/' + file))
images = numpy.load(file)
ni, ny, nx = images.shape
'''
# link images numbers to names
names = []
num = []
#index = numpy.ndarray((ni,), dtype = numpy.int16)
# the number following the name indicates at which index the images of
# the person start.
off = []
count = 0
with open('lfw_names.txt') as fp:
line = fp.readline()
while line:
theline = line.split(' ')
names.append(theline[0])
num.append(int(theline[1]))
line = fp.readline()
# PCA matrix
u = numpy.load("lfwfp1140eigim.npy")
# coordinates
v = numpy.load("lfwfp1140coord.npy")
# total number of images
ni = v.shape[1]
count = count_img(num)
# correct the last count
if ni - num[-1] > 1:
count.append(ni-num[-1])
names_repeat = []
index_repeat = []
name_index = {}
min_num_pics = 40
for i in range (len(count)):
if count[i] >= min_num_pics:
for j in range(count[i]):
names_repeat.append(names[i])
index_repeat.append(num[i] + j)
select = 'Bill_Clinton'
select = 'Vladimir_Putin'
i = 0
while (names_repeat[i] != select):
i+=1
nselect = reduce(lambda x,y: x + 1 if y == select else x, names_repeat,0)
# the selected person will be in the range [i, i-nselect-1]
nc, ny, nx = u.shape
index = index_repeat[i]
PCA_image = numpy.dot(u.T, v.T[index])
PCA_image = numpy.reshape(PCA_image, (ny, nx))
plt.figure()
plt.title('PCA approximation of the image %d' % i)
plt.imshow(PCA_image.T, cmap = 'gray')
plt.show()
#n = nx*ny
#u = numpy.reshape(u, (nc, n))
# create the test set and cross validation set
# if a person has:
# n pics, train/cross validation split
# 2 , 1-1
# 3 , 2-1
# 4 , 3-1
# 5 , 3-2
# 6 , 4-2
# 7 , 5-2
# 8 , 5-3
# 9 , 6-3
# 10 , 70%-30%
# 11 , idem
# 12 ,
training_set_indices = []
cv_set_indices = []
face_index = 0
for select in names:
if select in names_repeat:
i = 0
while (i < len(names_repeat) and names_repeat[i] != select):
i+=1
#print (select, i)
nselect = reduce(lambda x,y: x + 1 if y == select else x, names_repeat,0)
#print ("{0}, found {1} images".format(select, nselect))
if nselect == 2:
nts = 1
ncv = 1
elif nselect == 3:
nts = 2
ncv = 1
elif nselect == 4:
nts = 3
ncv = 1
elif nselect == 5:
nts = 3
ncv = 2
elif nselect == 6:
nts = 4
ncv = 2
elif nselect == 7:
nts = 5
ncv = 2
elif nselect == 8:
nts = 5
ncv = 3
elif nselect == 9:
nts = 6
ncv = 3
else:
nts = int(nselect * 0.7)
ncv = nselect - nts
#print (" Number of images in training set {0}".format(nts))
#print (" Number of images in cross validation set {0}".format(ncv))
for n in range(nts):
training_set_indices.append((select, index_repeat[i+n], face_index))
for n in range(ncv):
cv_set_indices.append((select, index_repeat[i+nts+n], face_index))
face_index += 1
neig = v.shape[0]
training_set = numpy.zeros((len(training_set_indices), neig), dtype=v.dtype)
cv_set = numpy.zeros((len(cv_set_indices), neig), dtype=v.dtype)
for i,face in enumerate(training_set_indices):
faceindex = face[1]
training_set[i][:] = v.T[faceindex]
for i,face in enumerate(cv_set_indices):
faceindex = face[1]
cv_set[i][:] = v.T[faceindex]
# show that we are doing well
select = 'Vladimir_Putin'
select = 'Colin_Powell'
index = 0
while (not select == training_set_indices[index][0]):
index += 1
PCA_image = numpy.dot(u.T, training_set[index])
PCA_image = numpy.reshape(PCA_image, (ny, nx))
plt.figure()
plt.title('PCA approximation of the image {}'.format(training_set_indices[index][0]))
plt.imshow(PCA_image.T, cmap = 'gray')
plt.show()
# save description of dataset
pickle.dump(training_set_indices, open("training_set_indices.pkl", "wb"))
pickle.dump(cv_set_indices, open("cv_set_indices.pkl", "wb"))
| 25.837696 | 105 | 0.593516 | '''
Facial recognition data challenge
Data from Evgueni Ovtchinnikov: https://www.dropbox.com/sh/a62wxyw9fpzwt95/AABJE0CEAtqOuLXKo_sOTFMVa?dl=0
https://github.com/evgueni-ovtchinnikov
1. clean the dataset: select only images with more than one face
2. select 70% train 30% cross validation
'''
import numpy
from functools import reduce
import matplotlib.pyplot as plt
import pickle
__version__ = '0.1.0'
#from docopt import docopt
'''
args = docopt(__doc__, version=__version__)
file = args['<images>']
pref = args['<prefix>']
path = args['--path']
print('loading images from %s...' % (path + '/' + file))
images = numpy.load(file)
ni, ny, nx = images.shape
'''
# link images numbers to names
names = []
num = []
#index = numpy.ndarray((ni,), dtype = numpy.int16)
# the number following the name indicates at which index the images of
# the person start.
off = []
count = 0
with open('lfw_names.txt') as fp:
line = fp.readline()
while line:
theline = line.split(' ')
names.append(theline[0])
num.append(int(theline[1]))
line = fp.readline()
def count_img(num):
return [num[i+1] - num[i] for i in range(len(num)) if i < len(num)-1]
# PCA matrix
u = numpy.load("lfwfp1140eigim.npy")
# coordinates
v = numpy.load("lfwfp1140coord.npy")
# total number of images
ni = v.shape[1]
count = count_img(num)
# correct the last count
if ni - num[-1] > 1:
count.append(ni-num[-1])
names_repeat = []
index_repeat = []
name_index = {}
min_num_pics = 40
for i in range (len(count)):
if count[i] >= min_num_pics:
for j in range(count[i]):
names_repeat.append(names[i])
index_repeat.append(num[i] + j)
select = 'Bill_Clinton'
select = 'Vladimir_Putin'
i = 0
while (names_repeat[i] != select):
i+=1
nselect = reduce(lambda x,y: x + 1 if y == select else x, names_repeat,0)
# the selected person will be in the range [i, i-nselect-1]
nc, ny, nx = u.shape
index = index_repeat[i]
PCA_image = numpy.dot(u.T, v.T[index])
PCA_image = numpy.reshape(PCA_image, (ny, nx))
plt.figure()
plt.title('PCA approximation of the image %d' % i)
plt.imshow(PCA_image.T, cmap = 'gray')
plt.show()
#n = nx*ny
#u = numpy.reshape(u, (nc, n))
# create the test set and cross validation set
# if a person has:
# n pics, train/cross validation split
# 2 , 1-1
# 3 , 2-1
# 4 , 3-1
# 5 , 3-2
# 6 , 4-2
# 7 , 5-2
# 8 , 5-3
# 9 , 6-3
# 10 , 70%-30%
# 11 , idem
# 12 ,
training_set_indices = []
cv_set_indices = []
face_index = 0
for select in names:
if select in names_repeat:
i = 0
while (i < len(names_repeat) and names_repeat[i] != select):
i+=1
#print (select, i)
nselect = reduce(lambda x,y: x + 1 if y == select else x, names_repeat,0)
#print ("{0}, found {1} images".format(select, nselect))
if nselect == 2:
nts = 1
ncv = 1
elif nselect == 3:
nts = 2
ncv = 1
elif nselect == 4:
nts = 3
ncv = 1
elif nselect == 5:
nts = 3
ncv = 2
elif nselect == 6:
nts = 4
ncv = 2
elif nselect == 7:
nts = 5
ncv = 2
elif nselect == 8:
nts = 5
ncv = 3
elif nselect == 9:
nts = 6
ncv = 3
else:
nts = int(nselect * 0.7)
ncv = nselect - nts
#print (" Number of images in training set {0}".format(nts))
#print (" Number of images in cross validation set {0}".format(ncv))
for n in range(nts):
training_set_indices.append((select, index_repeat[i+n], face_index))
for n in range(ncv):
cv_set_indices.append((select, index_repeat[i+nts+n], face_index))
face_index += 1
neig = v.shape[0]
training_set = numpy.zeros((len(training_set_indices), neig), dtype=v.dtype)
cv_set = numpy.zeros((len(cv_set_indices), neig), dtype=v.dtype)
for i,face in enumerate(training_set_indices):
faceindex = face[1]
training_set[i][:] = v.T[faceindex]
for i,face in enumerate(cv_set_indices):
faceindex = face[1]
cv_set[i][:] = v.T[faceindex]
# show that we are doing well
select = 'Vladimir_Putin'
select = 'Colin_Powell'
index = 0
while (not select == training_set_indices[index][0]):
index += 1
PCA_image = numpy.dot(u.T, training_set[index])
PCA_image = numpy.reshape(PCA_image, (ny, nx))
plt.figure()
plt.title('PCA approximation of the image {}'.format(training_set_indices[index][0]))
plt.imshow(PCA_image.T, cmap = 'gray')
plt.show()
# save description of dataset
pickle.dump(training_set_indices, open("training_set_indices.pkl", "wb"))
pickle.dump(cv_set_indices, open("cv_set_indices.pkl", "wb"))
| 72 | 0 | 24 |
0566a1f6687a7cb51f4fa8c2b41d58aca8e69435 | 1,458 | py | Python | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/EXT/gpu_program_parameters.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/EXT/gpu_program_parameters.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/raw/GL/EXT/gpu_program_parameters.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | '''OpenGL extension EXT.gpu_program_parameters
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/EXT/gpu_program_parameters.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_gpu_program_parameters'
glProgramEnvParameters4fvEXT = platform.createExtensionFunction(
'glProgramEnvParameters4fvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glProgramEnvParameters4fvEXT( GLenum(target), GLuint(index), GLsizei(count), GLfloatArray(params) ) -> None',
argNames = ('target', 'index', 'count', 'params',),
)
glProgramLocalParameters4fvEXT = platform.createExtensionFunction(
'glProgramLocalParameters4fvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glProgramLocalParameters4fvEXT( GLenum(target), GLuint(index), GLsizei(count), GLfloatArray(params) ) -> None',
argNames = ('target', 'index', 'count', 'params',),
)
def glInitGpuProgramParametersEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 40.5 | 119 | 0.793553 | '''OpenGL extension EXT.gpu_program_parameters
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/EXT/gpu_program_parameters.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_gpu_program_parameters'
glProgramEnvParameters4fvEXT = platform.createExtensionFunction(
'glProgramEnvParameters4fvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glProgramEnvParameters4fvEXT( GLenum(target), GLuint(index), GLsizei(count), GLfloatArray(params) ) -> None',
argNames = ('target', 'index', 'count', 'params',),
)
glProgramLocalParameters4fvEXT = platform.createExtensionFunction(
'glProgramLocalParameters4fvEXT', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glProgramLocalParameters4fvEXT( GLenum(target), GLuint(index), GLsizei(count), GLfloatArray(params) ) -> None',
argNames = ('target', 'index', 'count', 'params',),
)
def glInitGpuProgramParametersEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
| 0 | 0 | 0 |
661a072690780c03ca4e1a7c8536ff9fd3e6b78c | 3,295 | py | Python | unit_tests.py | galnegus/smiley-sentiment | 05210b5a521a10f6346b0a2f7787810c75aeab46 | [
"MIT"
] | null | null | null | unit_tests.py | galnegus/smiley-sentiment | 05210b5a521a10f6346b0a2f7787810c75aeab46 | [
"MIT"
] | null | null | null | unit_tests.py | galnegus/smiley-sentiment | 05210b5a521a10f6346b0a2f7787810c75aeab46 | [
"MIT"
] | null | null | null | import re, unittest
from feature_reduction import *
# user pattern tests
# url pattern tests
# more url tests (if needed)
# https://mathiasbynens.be/demo/url-regex
# repeating pattern tests
# reduce tests
if __name__ == '__main__':
unittest.main()
| 42.792208 | 158 | 0.750228 | import re, unittest
from feature_reduction import *
class UnitTests(unittest.TestCase):
# user pattern tests
def test_user_pattern_token(self):
replace_test = user_pattern.sub(user_token, '@lololol, h@h@h@ ipsum@dolor.co \'@aaa\'')
replace_answer = user_token + ', h@h@h@ ipsum@dolor.co \'' + user_token + '\''
self.assertEqual(replace_test, replace_answer)
def test_user_pattern_match(self):
self.assertTrue(user_pattern.fullmatch('@abc'))
self.assertTrue(user_pattern.fullmatch('@a2____224'))
self.assertTrue(user_pattern.fullmatch('@108'))
def test_user_pattern_fail(self):
self.assertFalse(user_pattern.fullmatch('fuu@bar'))
self.assertFalse(user_pattern.fullmatch('@abc.de'))
self.assertFalse(user_pattern.fullmatch('\'@kek'))
self.assertFalse(user_pattern.fullmatch('@1234567890123456'))
self.assertFalse(user_pattern.fullmatch(''))
# url pattern tests
def test_url_pattern_token(self):
replace_test = url_pattern.sub(url_token, 'http://foo.com/blah_blah test https://142.42.1.1/')
replace_answer = url_token + ' test ' + url_token
self.assertEqual(replace_test, replace_answer)
def test_url_pattern_match(self):
self.assertTrue(url_pattern.fullmatch("http://www.com"))
self.assertTrue(url_pattern.fullmatch("https://142.42.1.1/"))
self.assertTrue(url_pattern.fullmatch("https://www.example.com/foo/?bar=baz&inga=42&quux"))
def test_url_pattern_fail(self):
self.assertFalse(url_pattern.fullmatch('http:/'))
self.assertFalse(url_pattern.fullmatch('abc'))
self.assertFalse(url_pattern.fullmatch('http://www .website .com'))
self.assertFalse(url_pattern.fullmatch(''))
# more url tests (if needed)
# https://mathiasbynens.be/demo/url-regex
# repeating pattern tests
def test_repeating_pattern(self):
replace_test = repeating_pattern.sub(repeating_token, 'yyyyeeeeeeaaaaaaahhhhhhhh....... fooootball')
replace_answer = 'yyeeaahh.. football'
self.assertEqual(replace_test, replace_answer)
def test_repeating_match(self):
self.assertTrue(repeating_pattern.fullmatch('ooooooooo'))
self.assertTrue(repeating_pattern.fullmatch('......'))
self.assertTrue(repeating_pattern.fullmatch('111'))
def test_repeating_fail(self):
self.assertFalse(repeating_pattern.fullmatch('oo'))
self.assertFalse(repeating_pattern.fullmatch('11112222'))
self.assertFalse(repeating_pattern.fullmatch(''))
# reduce tests
def test_reduce(self):
test_1 = reduce('@angelicbiscuit http://twitpic.com/7pf62 - not a "get lost in melbourne" ad rip off we r sydney :p')
answer_1 = user_token + ' ' + url_token + ' - not a "get lost in melbourne" ad rip off we r sydney :p'
self.assertEqual(test_1, answer_1)
test_2 = reduce('im soooooooooooooo booooooooooooooooooorrrrrrrrrrrrred an i havent even been finished exams for a day damn you xbox live. damn u to hell')
answer_2 = 'im soo boorred an i havent even been finished exams for a day damn you xbox live. damn u to hell'
self.assertEqual(test_2, answer_2)
test_3 = reduce('i\'ve only been in sydney for 3 hrs but I miss my friends especially @ktjade!!!')
answer_3 = 'i\'ve only been in sydney for 3 hrs but I miss my friends especially ' + user_token + '!!'
self.assertEqual(test_3, answer_3)
if __name__ == '__main__':
unittest.main()
| 2,757 | 14 | 259 |
a47c3f87741ef98e7a475ca34e8ea398aab075b3 | 8,051 | py | Python | lisalike.py | geoffrey4444/numerical-orbits | 7b27e9e7e39c4fd38468ce638aa081013da9b19a | [
"MIT"
] | null | null | null | lisalike.py | geoffrey4444/numerical-orbits | 7b27e9e7e39c4fd38468ce638aa081013da9b19a | [
"MIT"
] | null | null | null | lisalike.py | geoffrey4444/numerical-orbits | 7b27e9e7e39c4fd38468ce638aa081013da9b19a | [
"MIT"
] | null | null | null | # Functions relevant for LISA-like orbits
#
# A LISA-like orbit has 3 satellites in a triangular configuration.
# Eccentricity and inclination combine to make the triangle
# tumble with a minimal amount of variation in the arm lengths
# See, e.g., K Rajesh Nayak et al, Class. Quantum Grav. 23, 1763 (2006).
import math
from constants import constants as k
import numpy as np
from scipy.optimize import newton
# Defaults: orbit average radius = 1.0 AU
# orbit angualr velocity average = omegaEarthPerDay1Body (assume only Sun)
# delta = 0.0
# Sigma is a phase that depends on whichSatellite = 1,2,3
# Returns a function of eccentric anomaly
# Numerically find the root of this function to get the eccentric anomaly
# $\Omega t - \sigma_k - \psi_k = e \sin\psi_k$
# $\Omega - \frac{d\psi_k}{dt} = e \cos\psi_k \frac{d\psi_k}{dt}$
# $\frac{d\psi_k}{dt} = \Omega / (1 + e \cos\psi_k)$
# Note: whichSatellite = 1,2,3 for the 3 satellites
| 58.34058 | 199 | 0.607378 | # Functions relevant for LISA-like orbits
#
# A LISA-like orbit has 3 satellites in a triangular configuration.
# Eccentricity and inclination combine to make the triangle
# tumble with a minimal amount of variation in the arm lengths
# See, e.g., K Rajesh Nayak et al, Class. Quantum Grav. 23, 1763 (2006).
import math
from constants import constants as k
import numpy as np
from scipy.optimize import newton
class lisalike:
# Defaults: orbit average radius = 1.0 AU
# orbit angualr velocity average = omegaEarthPerDay1Body (assume only Sun)
# delta = 0.0
def __init__(self, triangleSideLength=k.rLISAm/k.mPerAU, orbitRadius=1.0, orbitFrequency=k.omegaEarthPerDay1Body, delta=0.0):
self.triangleSideLength = triangleSideLength
self.orbitRadius = orbitRadius
self.orbitFrequency = orbitFrequency
# delta is a (small) correction to the orbital plane tilt, as in
# Fig. 1 of Nayak (2006).
self.delta = delta
self.alpha = self.triangleSideLength / (2.0 * orbitRadius)
# Eq. (1). of Nayak (2006)
tanInclinationNumerator = (2.0*self.alpha/math.sqrt(3)) * math.sin(math.pi/3.0 + self.delta)
tanInclinationDenominator = (1.0 + (2.0 * self.alpha / math.sqrt(3)) * math.cos(math.pi/3.0 + self.delta))
self.inclination = math.atan2(tanInclinationNumerator, tanInclinationDenominator)
self.eccentricity = math.sqrt(1+(4.0/3.0)*self.alpha*self.alpha + (4.0/math.sqrt(3.0))*self.alpha * math.cos(math.pi/3.0 + self.delta)) - 1.0
self.lastEccentricAnomaly = 0.0
# Sigma is a phase that depends on whichSatellite = 1,2,3
def sigma(self, whichSatellite):
return float(whichSatellite - 1) * 2.0 * math.pi / 3.0
# Returns a function of eccentric anomaly
# Numerically find the root of this function to get the eccentric anomaly
def getEccentricAnomalyFunction(self, time, whichSatellite):
def eccentricAnomalyFunction(eccentricAnomaly):
return eccentricAnomaly + self.eccentricity * math.sin(eccentricAnomaly) - self.orbitFrequency * time + self.sigma(whichSatellite)
return eccentricAnomalyFunction
def getEccentricAnomaly(self, time, whichSatellite, x0=0.0):
return newton(self.getEccentricAnomalyFunction(time, whichSatellite), x0, tol=1.e-15, maxiter=int(1e9))
# $\Omega t - \sigma_k - \psi_k = e \sin\psi_k$
# $\Omega - \frac{d\psi_k}{dt} = e \cos\psi_k \frac{d\psi_k}{dt}$
# $\frac{d\psi_k}{dt} = \Omega / (1 + e \cos\psi_k)$
def dtEccentricAnomaly(self, time, whichSatellite, x0=0.0):
eccentricAnomaly = self.getEccentricAnomaly(time, whichSatellite, x0)
return self.orbitFrequency / (1.0 + self.eccentricity * math.cos(eccentricAnomaly))
def ddtEccentricAnomaly(self, time, whichSatellite, x0=0.0):
eccentricAnomaly = self.getEccentricAnomaly(time, whichSatellite, x0)
dtEccentricAnomaly = self.dtEccentricAnomaly(time, whichSatellite, x0)
return self.orbitFrequency * self.eccentricity * math.sin(eccentricAnomaly) * dtEccentricAnomaly / (1.0 + self.eccentricity * math.cos(eccentricAnomaly))**2
def position1(self, time, whichSatellite, x0=0.0):
eccentricAnomaly = self.getEccentricAnomaly(time, whichSatellite, x0)
temp = (math.cos(eccentricAnomaly)+self.eccentricity)
position1 = np.array([self.orbitRadius * temp * math.cos(self.inclination),
self.orbitRadius * math.sin(eccentricAnomaly)*math.sqrt(1.0-self.eccentricity**2),
self.orbitRadius * temp * math.sin(self.inclination)])
return position1
# Note: whichSatellite = 1,2,3 for the 3 satellites
def position(self, time, whichSatellite, x0=0.0):
position1 = self.position1(time, whichSatellite, x0)
sigma = self.sigma(whichSatellite)
position = np.array([position1[0] * math.cos(sigma) - position1[1] * math.sin(sigma),
position1[0] * math.sin(sigma) + position1[1] * math.cos(sigma),
position1[2]])
return position
def relativePosition(self, time, whichSatellite, x0=0.0):
position = self.position(time, whichSatellite, x0)
return position - self.orbitRadius * np.array([math.cos(self.orbitFrequency * time),
math.sin(self.orbitFrequency * time),
0.0])
def velocity1(self, time, whichSatellite, x0=0.0):
eccentricAnomaly = self.getEccentricAnomaly(time, whichSatellite, x0)
velocity1 = self.orbitRadius * np.array([-1.0 * math.sin(eccentricAnomaly) * math.cos(self.inclination),
math.cos(eccentricAnomaly)*math.sqrt(1.0-self.eccentricity**2),
-1.0 * math.sin(eccentricAnomaly) * math.sin(self.inclination)
])
velocity1 *= self.dtEccentricAnomaly(time, whichSatellite, x0)
return velocity1
def velocity(self, time, whichSatellite, x0=0.0):
velocity1 = self.velocity1(time, whichSatellite, x0)
sigma = self.sigma(whichSatellite)
velocity = np.array([velocity1[0] * math.cos(sigma) - velocity1[1] * math.sin(sigma),
velocity1[0] * math.sin(sigma) + velocity1[1] * math.cos(sigma),
velocity1[2]])
return velocity
def relativeVelocity(self, time, whichSatellite, x0=0.0):
velocity = self.velocity(time, whichSatellite, x0=0.0)
return velocity - self.orbitRadius * self.orbitFrequency * np.array([-1.0* math.sin(self.orbitFrequency * time),
math.cos(self.orbitFrequency * time),
0.0])
def acceleration1(self, time, whichSatellite, x0=0.0):
eccentricAnomaly = self.getEccentricAnomaly(time, whichSatellite, x0)
acceleration1 = self.orbitRadius * np.array([-1.0 * math.cos(eccentricAnomaly) * math.cos(self.inclination),
-1.0 * math.sin(eccentricAnomaly)*math.sqrt(1.0-self.eccentricity**2),
-1.0 * math.cos(eccentricAnomaly) * math.sin(self.inclination)
])
acceleration1 *= self.dtEccentricAnomaly(time, whichSatellite, x0) * self.dtEccentricAnomaly(time, whichSatellite, x0)
acceleration1 += self.orbitRadius * np.array([-1.0 * math.sin(eccentricAnomaly) * math.cos(self.inclination),
math.cos(eccentricAnomaly)*math.sqrt(1.0-self.eccentricity**2),
-1.0 * math.sin(eccentricAnomaly) * math.sin(self.inclination)
]) * self.ddtEccentricAnomaly(time, whichSatellite, x0)
return acceleration1
def acceleration(self, time, whichSatellite, x0=0.0):
acceleration1 = self.acceleration1(time, whichSatellite, x0)
sigma = self.sigma(whichSatellite)
acceleration = np.array([acceleration1[0] * math.cos(sigma) - acceleration1[1] * math.sin(sigma),
acceleration1[0] * math.sin(sigma) + acceleration1[1] * math.cos(sigma),
acceleration1[2]])
return acceleration
def relativeAcceleration(self, time, whichSatellite, x0=0.0):
acceleration = self.acceleration(time, whichSatellite, x0=0.0)
return acceleration - self.orbitRadius * self.orbitFrequency * self.orbitFrequency * np.array([-1.0 * math.cos(self.orbitFrequency * time), -1.0 * math.sin(self.orbitFrequency * time), 0.0])
| 6,537 | -6 | 462 |
ac6c1fb93062e7b35775300860d24ca6f86363e2 | 2,297 | py | Python | src/MLP/Train_Ann.py | NaskIII/skin-cancer-detection | 74352b4f83531abf87d38b59683b4b3e5e630963 | [
"MIT"
] | 1 | 2021-09-03T21:49:18.000Z | 2021-09-03T21:49:18.000Z | src/MLP/Train_Ann.py | NaskIII/skin-cancer-detection | 74352b4f83531abf87d38b59683b4b3e5e630963 | [
"MIT"
] | null | null | null | src/MLP/Train_Ann.py | NaskIII/skin-cancer-detection | 74352b4f83531abf87d38b59683b4b3e5e630963 | [
"MIT"
] | null | null | null | from Neural_Network import Neural_Network
if __name__ == '__main__':
ann, model = train_network()
save_ann(ann, model)
| 26.709302 | 121 | 0.56552 | from Neural_Network import Neural_Network
def train_network():
ann = Neural_Network()
datasetTrain, datasetTest = ann.load_data(
r'C:\Users\Raphael Nascimento\PycharmProjects\skin-melanoma-detector\src\Data\Dataset_ISIC_Normalizado.csv', 165)
xTrain = datasetTrain[:, 1:10]
yTrain = datasetTrain[:, 0]
model = ann.createModel()
history = ann.trainModel(model, xTrain, yTrain, 200)
print(ann.evaluateModel(model, xTrain, yTrain))
xTest = datasetTest[:, 1:10]
yTest = datasetTest[:, 0]
print(ann.evaluateModel(model, xTest, yTest))
predictions = ann.makePredictions_classes(model, xTest)
correct = 0
incorrect = 0
totalSpec = 0
totalSensi = 0
TP = 0
FP = 0
TN = 0
FN = 0
for i in yTest:
if i == 0:
totalSpec += 1
else:
totalSensi += 1
for i in range(len(xTest)):
if predictions[i] == yTest[i]:
correct += 1
if predictions[i] == 0:
TN += 1
elif predictions[i] == 1:
TP += 1
else:
incorrect += 1
if predictions[i] == 0:
FN += 1
elif predictions[i] == 1:
FP += 1
print('''Corretas: %s;
Incorretas: %s;
Especificidade: %s;
Sensibilidade: %s;
''' %
(correct, incorrect, TN, TP))
print('Total Especificidade: %s; Total Sensibilidade: %s;' % (totalSpec, totalSensi))
print('''Porcentagem Especificidade: %s;
Porcentagem Sensibilidade: %s;
Porcentagem Acurácia: %s;
''' %
(TN / totalSpec * 100, TP / totalSensi * 100, correct / len(xTest) * 100))
print('Especificidade: %s' % ((TN / (TN + FP)) * 100))
print('Sensibilidade: %s' % ((TP / (TP + FN)) * 100))
print('Precisão: %s' % ((TP / (TP + FP)) * 100))
print('Acurácia: %s' % (((TP + TN) / (TP + TN + FP + FN)) * 100))
predictions_ravel = ann.make_predictions_ravel(model, xTest)
ann.roc_curve(predictions_ravel, yTest)
return ann, model
def save_ann(ann, model):
save = int(input("Salvar a Rede: "))
if save == 1:
print(ann.export_model(model))
else:
print('Finish')
if __name__ == '__main__':
ann, model = train_network()
save_ann(ann, model)
| 2,123 | 0 | 46 |
a13c3c541bbc416cbb9283faa27af5c499b9e5cf | 38,027 | py | Python | 0900-hp/hplip-3.21.12/setup.py | rgfaber/dev-toolkit | b7e6b1e35a4bfb8ca9ba75e5556917cc49b88f7f | [
"Apache-2.0"
] | null | null | null | 0900-hp/hplip-3.21.12/setup.py | rgfaber/dev-toolkit | b7e6b1e35a4bfb8ca9ba75e5556917cc49b88f7f | [
"Apache-2.0"
] | null | null | null | 0900-hp/hplip-3.21.12/setup.py | rgfaber/dev-toolkit | b7e6b1e35a4bfb8ca9ba75e5556917cc49b88f7f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
__version__ = '9.0'
__title__ = 'Printer/Fax Setup Utility'
__mod__ = 'hp-setup'
__doc__ = "Installs HPLIP printers and faxes in the CUPS spooler. Tries to automatically determine the correct PPD file to use. Allows the printing of a testpage. Performs basic fax parameter setup."
# Std Lib
import sys
import getopt
import time
import os.path
import re
import os
import gzip
try:
import readline
except ImportError:
pass
# Local
from base.g import *
from base import device, utils, tui, models, module, services, os_utils
from prnt import cups
from base.sixext.moves import input
from base.sixext import to_unicode, from_unicode_to_str
try:
from importlib import import_module
except ImportError as e:
log.debug(e)
from base.utils import dyn_import_mod as import_module
pm = None
nickname_pat = re.compile(r'''\*NickName:\s*\"(.*)"''', re.MULTILINE)
USAGE = [ (__doc__, "", "name", True),
("Usage: %s [MODE] [OPTIONS] [SERIAL NO.|USB bus:device|IP|DEVNODE]" % __mod__, "", "summary", True),
utils.USAGE_MODE,
utils.USAGE_GUI_MODE,
utils.USAGE_INTERACTIVE_MODE,
utils.USAGE_SPACE,
utils.USAGE_OPTIONS,
("Automatic mode:", "-a or --auto (-i mode only)", "option", False),
("To specify the port on a multi-port JetDirect:", "--port=<port> (Valid values are 1\*, 2, and 3. \*default)", "option", False),
("No testpage in automatic mode:", "-x (-i mode only)", "option", False),
("To specify a CUPS printer queue name:", "-p<printer> or --printer=<printer> (-i mode only)", "option", False),
("To specify a CUPS fax queue name:", "-f<fax> or --fax=<fax> (-i mode only)", "option", False),
("Type of queue(s) to install:", "-t<typelist> or --type=<typelist>. <typelist>: print*, fax\* (\*default) (-i mode only)", "option", False),
("To specify the device URI to install:", "-d<device> or --device=<device> (--qt4 mode only)", "option", False),
("Remove printers or faxes instead of setting-up:", "-r or --rm or --remove", "option", False),
utils.USAGE_LANGUAGE,
utils.USAGE_LOGGING1, utils.USAGE_LOGGING2, utils.USAGE_LOGGING3,
utils.USAGE_HELP,
("[SERIAL NO.|USB ID|IP|DEVNODE]", "", "heading", False),
("USB bus:device (usb only):", """"xxx:yyy" where 'xxx' is the USB bus and 'yyy' is the USB device. (Note: The ':' and all leading zeros must be present.)""", 'option', False),
("", "Use the 'lsusb' command to obtain this information.", "option", False),
("IPs (network only):", 'IPv4 address "a.b.c.d" or "hostname"', "option", False),
("DEVNODE (parallel only):", '"/dev/parportX", X=0,1,2,...', "option", False),
("SERIAL NO. (usb and parallel only):", '"serial no."', "option", True),
utils.USAGE_EXAMPLES,
("Setup using GUI mode:", "$ hp-setup", "example", False),
("Setup using GUI mode, specifying usb:", "$ hp-setup -b usb", "example", False),
("Setup using GUI mode, specifying an IP:", "$ hp-setup 192.168.0.101", "example", False),
("One USB printer attached, automatic:", "$ hp-setup -i -a", "example", False),
("USB, IDs specified:", "$ hp-setup -i 001:002", "example", False),
("Network:", "$ hp-setup -i 66.35.250.209", "example", False),
("Network, Jetdirect port 2:", "$ hp-setup -i --port=2 66.35.250.209", "example", False),
("Parallel:", "$ hp-setup -i /dev/parport0", "example", False),
("USB or parallel, using serial number:", "$ hp-setup -i US12345678A", "example", False),
("USB, automatic:", "$ hp-setup -i --auto 001:002", "example", False),
("Parallel, automatic, no testpage:", "$ hp-setup -i -a -x /dev/parport0", "example", False),
("Parallel, choose device:", "$ hp-setup -i -b par", "example", False),
utils.USAGE_SPACE,
utils.USAGE_NOTES,
("1. If no serial number, USB ID, IP, or device node is specified, the USB and parallel busses will be probed for devices.", "", 'note', False),
("2. Using 'lsusb' to obtain USB IDs: (example)", "", 'note', False),
(" $ lsusb", "", 'note', False),
(" Bus 003 Device 011: ID 03f0:c202 Hewlett-Packard", "", 'note', False),
(" $ hp-setup --auto 003:011", "", 'note', False),
(" (Note: You may have to run 'lsusb' from /sbin or another location. Use '$ locate lsusb' to determine this.)", "", 'note', True),
("3. Parameters -a, -f, -p, or -t are not valid in GUI (-u) mode.", "", 'note', True),
utils.USAGE_SPACE,
utils.USAGE_SEEALSO,
("hp-makeuri", "", "seealso", False),
("hp-probe", "", "seealso", False),
]
mod = module.Module(__mod__, __title__, __version__, __doc__, USAGE,
(INTERACTIVE_MODE, GUI_MODE),
(UI_TOOLKIT_QT3, UI_TOOLKIT_QT4, UI_TOOLKIT_QT5),
run_as_root_ok=True)
opts, device_uri, printer_name, mode, ui_toolkit, loc = \
mod.parseStdOpts('axp:P:f:t:b:d:rq',
['ttl=', 'filter=', 'search=', 'find=',
'method=', 'time-out=', 'timeout=',
'printer=', 'fax=', 'type=', 'port=',
'auto', 'device=', 'rm', 'remove'],
handle_device_printer=False)
selected_device_name = None
printer_name = None
fax_name = None
bus = None
setup_print = True
setup_fax = True
makeuri = None
auto = False
testpage_in_auto_mode = True
jd_port = 1
remove = False
ignore_plugin_check = False
for o, a in opts:
if o == '-x':
testpage_in_auto_mode = False
elif o in ('-P', '-p', '--printer'):
printer_name = a
elif o in ('-f', '--fax'):
fax_name = a
elif o in ('-d', '--device'):
device_uri = a
elif o in ('-b', '--bus'):
bus = [x.lower().strip() for x in a.split(',')]
if not device.validateBusList(bus, False):
mod.usage(error_msg=['Invalid bus name'])
elif o in ('-t', '--type'):
setup_fax, setup_print = False, False
a = a.strip().lower()
for aa in a.split(','):
if aa.strip() not in ('print', 'fax'):
mod.usage(error_msg=['Invalid type.'])
if aa.strip() == 'print':
setup_print = True
elif aa.strip() == 'fax':
if not prop.fax_build:
log.error("Cannot enable fax setup - HPLIP not built with fax enabled.")
else:
setup_fax = True
elif o == '--port':
try:
jd_port = int(a)
except ValueError:
#log.error("Invalid port number. Must be between 1 and 3 inclusive.")
mod.usage(error_msg=['Invalid port number. Must be between 1 and 3 inclusive.'])
elif o in ('-a', '--auto'):
auto = True
elif o in ('-r', '--rm', '--remove'):
remove = True
elif o in ('-q'):
ignore_plugin_check = True
try:
param = mod.args[0]
except IndexError:
param = ''
log.debug("param=%s" % param)
if printer_name is not None:
selected_device_name = printer_name
else:
if fax_name is not None:
selected_device_name = fax_name
log.debug("selected_device_name=%s" % selected_device_name)
if mode == GUI_MODE:
if selected_device_name is not None:
log.warning("-p or -f option is not supported")
if ui_toolkit == 'qt3':
if not utils.canEnterGUIMode():
log.error("%s requires GUI support (try running with --qt4). Also, try using interactive (-i) mode." % __mod__)
clean_exit(1)
else:
if not utils.canEnterGUIMode4():
log.error("%s requires GUI support (try running with --qt3). Also, try using interactive (-i) mode." % __mod__)
clean_exit(1)
if mode == GUI_MODE:
if ui_toolkit == 'qt3':
try:
from qt import *
from ui import setupform
except ImportError:
log.error("Unable to load Qt3 support. Is it installed?")
clean_exit(1)
if remove:
log.warn("-r/--rm/--remove not supported in qt3 mode.")
app = QApplication(sys.argv)
QObject.connect(app, SIGNAL("lastWindowClosed()"), app, SLOT("quit()"))
if loc is None:
loc = user_conf.get('ui', 'loc', 'system')
if loc.lower() == 'system':
loc = str(QTextCodec.locale())
log.debug("Using system locale: %s" % loc)
if loc.lower() != 'c':
e = 'utf8'
try:
l, x = loc.split('.')
loc = '.'.join([l, e])
except ValueError:
l = loc
loc = '.'.join([loc, e])
log.debug("Trying to load .qm file for %s locale." % loc)
trans = QTranslator(None)
qm_file = 'hplip_%s.qm' % l
log.debug("Name of .qm file: %s" % qm_file)
loaded = trans.load(qm_file, prop.localization_dir)
if loaded:
app.installTranslator(trans)
else:
loc = 'c'
if loc == 'c':
log.debug("Using default 'C' locale")
else:
log.debug("Using locale: %s" % loc)
QLocale.setDefault(QLocale(loc))
prop.locale = loc
try:
locale.setlocale(locale.LC_ALL, locale.normalize(loc))
except locale.Error:
pass
try:
w = setupform.SetupForm(bus, param, jd_port)
except Error:
log.error("Unable to connect to HPLIP I/O. Please (re)start HPLIP and try again.")
clean_exit(1)
app.setMainWidget(w)
w.show()
app.exec_loop()
cups.releaseCupsInstance()
else: # qt4
# if utils.ui_status[1] == "PyQt4":
# try:
# from PyQt4.QtGui import QApplication, QMessageBox
# from ui4.setupdialog import SetupDialog
# except ImportError as e:
# log.error(e)
# clean_exit(1)
# elif utils.ui_status[1] == "PyQt5":
# try:
# from PyQt5.QtWidgets import QApplication, QMessageBox
# from ui5.setupdialog import SetupDialog
# except ImportError as e:
# log.error(e)
# clean_exit(1)
# else:
# log.error("Unable to load Qt support. Is it installed?")
# clean_exit(1)
QApplication, ui_package = utils.import_dialog(ui_toolkit)
ui = import_module(ui_package + ".setupdialog")
app = QApplication(sys.argv)
log.debug("Sys.argv=%s printer_name=%s param=%s jd_port=%s device_uri=%s remove=%s" % (sys.argv, printer_name, param, jd_port, device_uri, remove))
dlg = ui.SetupDialog(None, param, jd_port, device_uri, remove)
dlg.show()
try:
log.debug("Starting GUI Event Loop...")
app.exec_()
except KeyboardInterrupt:
clean_exit(0)
else: # INTERACTIVE_MODE
try:
try:
from base import password
except ImportError:
log.warn("Failed to import Password Object")
else:
cups.setPasswordCallback(password.showPasswordPrompt)
#Removing Queue
if remove:
tui.header("REMOVING PRINT/FAX QUEUE")
sts, printer_name, device_uri = mod.getPrinterName(selected_device_name,None,['hp','hpfax'])
selected_device_name = printer_name
log.info (log.bold("Removing '%s : %s' Queue"%(printer_name, device_uri)))
status, status_str = cups.cups_operation(cups.delPrinter, INTERACTIVE_MODE, '', None, selected_device_name)
if cups.IPP_OK == status:
log.info("Successfully deleted %s Print/Fax queue"%selected_device_name)
utils.sendEvent(EVENT_CUPS_QUEUES_REMOVED,device_uri, printer_name)
clean_exit(0)
else:
log.error("Failed to delete %s Print/Fax queue. Error : %s"%(selected_device_name,status_str))
clean_exit(1)
if not auto:
log.info("(Note: Defaults for each question are maked with a '*'. Press <enter> to accept the default.)")
log.info("")
# ******************************* MAKEURI
if param:
device_uri, sane_uri, fax_uri = device.makeURI(param, jd_port)
# ******************************* CONNECTION TYPE CHOOSER
if not device_uri and bus is None:
bus = tui.connection_table()
if bus is None:
clean_exit(0)
log.info("\nUsing connection type: %s" % bus[0])
log.info("")
# ******************************* DEVICE CHOOSER
if not device_uri:
log.debug("\nDEVICE CHOOSER setup_fax=%s, setup_print=%s" % (setup_fax, setup_print))
device_uri = mod.getDeviceUri(devices = device.probeDevices(bus))
if not device_uri:
clean_exit(0)
# ******************************* QUERY MODEL AND COLLECT PPDS
log.info(log.bold("\nSetting up device: %s\n" % device_uri))
log.info("")
print_uri = device_uri.replace("hpfax:", "hp:")
fax_uri = device_uri.replace("hp:", "hpfax:")
back_end, is_hp, bus, model, \
serial, dev_file, host, zc, port = \
device.parseDeviceURI(device_uri)
log.debug("Model=%s" % model)
mq = device.queryModelByURI(device_uri)
if not mq or mq.get('support-type', SUPPORT_TYPE_NONE) == SUPPORT_TYPE_NONE:
log.error("Unsupported printer model.")
clean_exit(1)
if mq.get('fax-type', FAX_TYPE_NONE) in (FAX_TYPE_NONE, FAX_TYPE_NOT_SUPPORTED) and setup_fax:
#log.warning("Cannot setup fax - device does not have fax feature.")
setup_fax = False
# ******************************* PLUGIN
norm_model = models.normalizeModelName(model).lower()
plugin = mq.get('plugin', PLUGIN_NONE)
if ignore_plugin_check is False and plugin > PLUGIN_NONE:
from installer import pluginhandler
pluginObj = pluginhandler.PluginHandle()
plugin_sts = pluginObj.getStatus()
if plugin_sts != pluginhandler.PLUGIN_INSTALLED:
if plugin_sts == pluginhandler.PLUGIN_VERSION_MISMATCH:
tui.header("UPDATING PLUGIN")
else:
tui.header("PLUG-IN INSTALLATION")
hp_plugin = utils.which('hp-plugin')
if hp_plugin:
cmd = "hp-plugin -i"
if os_utils.execute(cmd) != 0:
log.error("Failed to install Plugin.")
log.error("The device you are trying to setup requires a binary plug-in. Some functionalities may not work as expected without plug-ins. Please run 'hp-plugin' as normal user to install plug-ins.Visit http://hplipopensource.com for more infomation.")
clean_exit(1)
ppds = cups.getSystemPPDs()
default_model = utils.xstrip(model.replace('series', '').replace('Series', ''), '_')
installed_print_devices = device.getSupportedCUPSDevices(['hp'])
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
log.debug("found print queue '%s'" % p)
installed_fax_devices = device.getSupportedCUPSDevices(['hpfax'])
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
log.debug("found fax queue '%s'" % f)
# ******************************* PRINT QUEUE SETUP
if setup_print:
tui.header("PRINT QUEUE SETUP")
if not auto and print_uri in installed_print_devices:
log.warning("One or more print queues already exist for this device: %s." %
', '.join(installed_print_devices[print_uri]))
ok, setup_print = tui.enter_yes_no("\nWould you like to install another print queue for this device", 'n')
if not ok: clean_exit(0)
if setup_print:
if auto:
printer_name = default_model
printer_default_model = default_model
installed_printer_names = device.getSupportedCUPSPrinterNames(['hp'])
# Check for duplicate names
if (device_uri in installed_print_devices and printer_default_model in installed_print_devices[device_uri]) \
or (printer_default_model in installed_printer_names):
i = 2
while True:
t = printer_default_model + "_%d" % i
if (t not in installed_printer_names) and(device_uri not in installed_print_devices or t not in installed_print_devices[device_uri]):
printer_default_model += "_%d" % i
break
i += 1
if not auto:
if printer_name is None:
while True:
printer_name = input(log.bold("\nPlease enter a name for this print queue (m=use model name:'%s'*, q=quit) ?" % printer_default_model))
if printer_name.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
if not printer_name or printer_name.lower().strip() == 'm':
printer_name = printer_default_model
name_ok = True
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
if printer_name == p:
log.error("A print queue with that name already exists. Please enter a different name.")
name_ok = False
break
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
if printer_name == f:
log.error("A fax queue with that name already exists. Please enter a different name.")
name_ok = False
break
for c in printer_name:
if c in cups.INVALID_PRINTER_NAME_CHARS:
log.error("Invalid character '%s' in printer name. Please enter a name that does not contain this character." % c)
name_ok = False
if name_ok:
break
else:
printer_name = printer_default_model
log.info("Using queue name: %s" % printer_name)
default_model = utils.xstrip(model.replace('series', '').replace('Series', ''), '_')
log.info("Locating PPD file... Please wait.")
print_ppd = cups.getPPDFile2(mq, default_model, ppds)
enter_ppd = False
if print_ppd is None:
enter_ppd = True
log.error("Unable to find an appropriate PPD file.")
else:
print_ppd, desc = print_ppd
log.info("\nFound PPD file: %s" % print_ppd)
log.info("Description: %s" % desc)
#
if not auto:
log.info("\nNote: The model number may vary slightly from the actual model number on the device.")
ok, ans = tui.enter_yes_no("\nDoes this PPD file appear to be the correct one")
if not ok: clean_exit(0)
if not ans: enter_ppd = True
if enter_ppd:
enter_ppd = False
ok, enter_ppd = tui.enter_yes_no("\nWould you like to specify the path to the correct PPD file to use", 'n')
if not ok: clean_exit(0)
if enter_ppd:
ok = False
while True:
user_input = input(log.bold("\nPlease enter the full filesystem path to the PPD file to use (q=quit) :"))
if user_input.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
file_path = user_input
if os.path.exists(file_path) and os.path.isfile(file_path):
if file_path.endswith('.gz'):
nickname = gzip.GzipFile(file_path, 'r').read(4096)
else:
nickname = open(file_path, 'r').read(4096)
try:
desc = nickname_pat.search(nickname).group(1)
except AttributeError:
desc = ''
if desc:
log.info("Description for the file: %s" % desc)
else:
log.error("No PPD 'NickName' found. This file may not be a valid PPD file.")
ok, ans = tui.enter_yes_no("\nUse this file")
if not ok: clean_exit(0)
if ans: print_ppd = file_path
else:
log.error("File not found or not an appropriate (PPD) file.")
if ok:
break
else:
log.error("PPD file required. Setup cannot continue. Exiting.")
clean_exit(1)
if auto:
location, info = '', '%s Device (Automatically setup by HPLIP)'%(default_model.replace('_',' '))
else:
while True:
location = input(log.bold("Enter a location description for this printer (q=quit) ?"))
if location.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
while True:
info = input(log.bold("Enter additonal information or notes for this printer (q=quit) ?"))
if info.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
log.info(log.bold("\nAdding print queue to CUPS:"))
log.info("Device URI: %s" % print_uri)
log.info("Queue name: %s" % printer_name)
log.info("PPD file: %s" % print_ppd)
log.info("Location: %s" % location)
log.info("Information: %s" % info)
if not os.path.exists(print_ppd): # assume foomatic: or some such
add_prnt_args = (printer_name, print_uri, location, '', print_ppd, info)
else:
add_prnt_args = (printer_name, print_uri, location, print_ppd, '', info)
status, status_str = cups.cups_operation(cups.addPrinter, INTERACTIVE_MODE, '', None, *add_prnt_args)
log.debug("addPrinter() returned (%d, %s)" % (status, status_str))
log.debug(device.getSupportedCUPSDevices(['hp']))
if status != cups.IPP_OK:
log.error("Printer queue setup failed. Error : %s "%status_str)
clean_exit(1)
else:
# sending Event to add this device in hp-systray
utils.sendEvent(EVENT_CUPS_QUEUES_ADDED,print_uri, printer_name)
# Updating firmware download for supported devices.
if ignore_plugin_check is False and mq.get('fw-download', False):
try:
d = device.Device(print_uri)
except Error:
log.error("Error opening device. Firmware download is Failed.")
else:
if d.downloadFirmware():
log.info("Firmware download successful.\n")
else:
log.error("Firmware download is Failed.")
d.close()
# ******************************* FAX QUEUE SETUP
if setup_fax and not prop.fax_build:
log.error("Cannot setup fax - HPLIP not built with fax enabled.")
setup_fax = False
if setup_fax:
try:
from fax import fax
except ImportError:
# This can fail on Python < 2.3 due to the datetime module
setup_fax = False
log.warning("Fax setup disabled - Python 2.3+ required.")
log.info("")
if setup_fax:
tui.header("FAX QUEUE SETUP")
if not auto and fax_uri in installed_fax_devices:
log.warning("One or more fax queues already exist for this device: %s." % ', '.join(installed_fax_devices[fax_uri]))
ok, setup_fax = tui.enter_yes_no("\nWould you like to install another fax queue for this device", 'n')
if not ok: clean_exit(0)
if setup_fax:
if auto: # or fax_name is None:
fax_name = default_model + '_fax'
fax_default_model = default_model + '_fax'
installed_fax_names = device.getSupportedCUPSPrinterNames(['hpfax'])
# Check for duplicate names
if (fax_uri in installed_fax_devices and fax_default_model in installed_fax_devices[fax_uri]) \
or (fax_default_model in installed_fax_names):
i = 2
while True:
t = fax_default_model + "_%d" % i
if (t not in installed_fax_names) and (fax_uri not in installed_fax_devices or t not in installed_fax_devices[fax_uri]):
fax_default_model += "_%d" % i
break
i += 1
if not auto:
if fax_name is None:
while True:
fax_name = input(log.bold("\nPlease enter a name for this fax queue (m=use model name:'%s'*, q=quit) ?" % fax_default_model))
if fax_name.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
if not fax_name or fax_name.lower().strip() == 'm':
fax_name = fax_default_model
name_ok = True
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
if fax_name == p:
log.error("A print queue with that name already exists. Please enter a different name.")
name_ok = False
break
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
if fax_name == f:
log.error("A fax queue with that name already exists. Please enter a different name.")
name_ok = False
break
for c in fax_name:
if c in (' ', '#', '/', '%'):
log.error("Invalid character '%s' in fax name. Please enter a name that does not contain this character." % c)
name_ok = False
if name_ok:
break
else:
fax_name = fax_default_model
log.info("Using queue name: %s" % fax_name)
fax_ppd,fax_ppd_type,nick = cups.getFaxPPDFile(mq, fax_name)
if not fax_ppd:
log.error("Unable to find HP fax PPD file! Please check you HPLIP installation and try again.")
clean_exit(1)
if auto:
location, info = '', '%s Fax Device (Automatically setup by HPLIP)'%(default_model.replace('_',' '))
else:
while True:
location = input(log.bold("Enter a location description for this printer (q=quit) ?"))
if location.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
while True:
info = input(log.bold("Enter additonal information or notes for this printer (q=quit) ?"))
if info.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
log.info(log.bold("\nAdding fax queue to CUPS:"))
log.info("Device URI: %s" % fax_uri)
log.info("Queue name: %s" % fax_name)
log.info("PPD file: %s" % fax_ppd)
log.info("Location: %s" % location)
log.info("Information: %s" % info)
cups.setPasswordPrompt("You do not have permission to add a fax device.")
if not os.path.exists(fax_ppd): # assume foomatic: or some such
status, status_str = cups.addPrinter(fax_name, fax_uri,
location, '', fax_ppd, info)
else:
status, status_str = cups.addPrinter(fax_name, fax_uri,
location, fax_ppd, '', info)
log.debug("addPrinter() returned (%d, %s)" % (status, status_str))
log.debug(device.getSupportedCUPSDevices(['hpfax']))
if status != cups.IPP_OK:
log.error("Fax queue setup failed. Error : %s"%status_str)
clean_exit(1)
else:
# sending Event to add this device in hp-systray
utils.sendEvent(EVENT_CUPS_QUEUES_ADDED,fax_uri, fax_name)
# ******************************* FAX HEADER SETUP
tui.header("FAX HEADER SETUP")
if auto:
setup_fax = False
else:
while True:
user_input = input(log.bold("\nWould you like to perform fax header setup (y=yes*, n=no, q=quit) ?")).strip().lower()
if user_input == 'q':
log.info("OK, done.")
clean_exit(0)
if not user_input:
user_input = 'y'
setup_fax = (user_input == 'y')
if user_input in ('y', 'n', 'q'):
break
log.error("Please enter 'y' or 'n'")
if setup_fax:
d = fax.getFaxDevice(fax_uri, disable_dbus=True)
try:
d.open()
except Error:
log.error("Unable to communicate with the device. Please check the device and try again.")
else:
try:
tries = 0
ok = True
while True:
tries += 1
try:
current_phone_num = str(d.getPhoneNum())
current_station_name = to_unicode(d.getStationName())
except Error:
log.error("Could not communicate with device. Device may be busy. Please wait for retry...")
time.sleep(5)
ok = False
if tries > 12:
break
else:
ok = True
break
if ok:
while True:
if current_phone_num:
phone_num = input(log.bold("\nEnter the fax phone number for this device (c=use current:'%s'*, q=quit) ?" % current_phone_num))
else:
phone_num = input(log.bold("\nEnter the fax phone number for this device (q=quit) ?"))
if phone_num.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
if current_phone_num and (not phone_num or phone_num.strip().lower() == 'c'):
phone_num = current_phone_num
if len(phone_num) > 50:
log.error("Phone number length is too long (>50 characters). Please enter a shorter number.")
continue
ok = True
for x in phone_num:
if x not in '0123456789-(+) ':
log.error("Invalid characters in phone number. Please only use 0-9, -, (, +, and )")
ok = False
break
if not ok:
continue
break
while True:
if current_station_name:
station_name = input(log.bold("\nEnter the name and/or company for this device (c=use current:'%s'*, q=quit) ?"%from_unicode_to_str(current_station_name)))
else:
station_name = input(log.bold("\nEnter the name and/or company for this device (q=quit) ?"))
if station_name.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
if current_station_name and (not station_name or station_name.strip().lower() == 'c'):
station_name = current_station_name
### Here station_name can be unicode or utf-8 sequence.
### making sure to convert data to unicode for all the cases.
try:
station_name.encode('utf-8')
except (UnicodeEncodeError,UnicodeDecodeError):
station_name = station_name.decode('utf-8')
if len(station_name) > 50:
log.error("Name/company length is too long (>50 characters). Please enter a shorter name/company.")
continue
break
try:
d.setStationName(station_name)
d.setPhoneNum(phone_num)
except Error:
log.error("Could not communicate with device. Device may be busy.")
else:
log.info("\nParameters sent to device.")
finally:
d.close()
# ******************************* TEST PAGE
if setup_print:
print_test_page = False
tui.header("PRINTER TEST PAGE")
if auto:
if testpage_in_auto_mode:
print_test_page = True
else:
ok, print_test_page = tui.enter_yes_no("\nWould you like to print a test page")
if not ok: clean_exit(0)
if print_test_page:
path = utils.which('hp-testpage')
if printer_name:
param = "-p%s" % printer_name
else:
param = "-d%s" % print_uri
if len(path) > 0:
cmd = 'hp-testpage -i %s' % param
else:
cmd = 'python ./testpage.py -i %s' % param
os_utils.execute(cmd)
except KeyboardInterrupt:
log.error("User exit")
cups.releaseCupsInstance()
log.info("")
log.info("Done.")
| 40.757771 | 274 | 0.50401 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
__version__ = '9.0'
__title__ = 'Printer/Fax Setup Utility'
__mod__ = 'hp-setup'
__doc__ = "Installs HPLIP printers and faxes in the CUPS spooler. Tries to automatically determine the correct PPD file to use. Allows the printing of a testpage. Performs basic fax parameter setup."
# Std Lib
import sys
import getopt
import time
import os.path
import re
import os
import gzip
try:
import readline
except ImportError:
pass
# Local
from base.g import *
from base import device, utils, tui, models, module, services, os_utils
from prnt import cups
from base.sixext.moves import input
from base.sixext import to_unicode, from_unicode_to_str
try:
from importlib import import_module
except ImportError as e:
log.debug(e)
from base.utils import dyn_import_mod as import_module
pm = None
def plugin_download_callback(c, s, t):
pm.update(int(100*c*s/t),
utils.format_bytes(c*s))
def clean_exit(code = 0):
cups.releaseCupsInstance()
sys.exit(code)
nickname_pat = re.compile(r'''\*NickName:\s*\"(.*)"''', re.MULTILINE)
USAGE = [ (__doc__, "", "name", True),
("Usage: %s [MODE] [OPTIONS] [SERIAL NO.|USB bus:device|IP|DEVNODE]" % __mod__, "", "summary", True),
utils.USAGE_MODE,
utils.USAGE_GUI_MODE,
utils.USAGE_INTERACTIVE_MODE,
utils.USAGE_SPACE,
utils.USAGE_OPTIONS,
("Automatic mode:", "-a or --auto (-i mode only)", "option", False),
("To specify the port on a multi-port JetDirect:", "--port=<port> (Valid values are 1\*, 2, and 3. \*default)", "option", False),
("No testpage in automatic mode:", "-x (-i mode only)", "option", False),
("To specify a CUPS printer queue name:", "-p<printer> or --printer=<printer> (-i mode only)", "option", False),
("To specify a CUPS fax queue name:", "-f<fax> or --fax=<fax> (-i mode only)", "option", False),
("Type of queue(s) to install:", "-t<typelist> or --type=<typelist>. <typelist>: print*, fax\* (\*default) (-i mode only)", "option", False),
("To specify the device URI to install:", "-d<device> or --device=<device> (--qt4 mode only)", "option", False),
("Remove printers or faxes instead of setting-up:", "-r or --rm or --remove", "option", False),
utils.USAGE_LANGUAGE,
utils.USAGE_LOGGING1, utils.USAGE_LOGGING2, utils.USAGE_LOGGING3,
utils.USAGE_HELP,
("[SERIAL NO.|USB ID|IP|DEVNODE]", "", "heading", False),
("USB bus:device (usb only):", """"xxx:yyy" where 'xxx' is the USB bus and 'yyy' is the USB device. (Note: The ':' and all leading zeros must be present.)""", 'option', False),
("", "Use the 'lsusb' command to obtain this information.", "option", False),
("IPs (network only):", 'IPv4 address "a.b.c.d" or "hostname"', "option", False),
("DEVNODE (parallel only):", '"/dev/parportX", X=0,1,2,...', "option", False),
("SERIAL NO. (usb and parallel only):", '"serial no."', "option", True),
utils.USAGE_EXAMPLES,
("Setup using GUI mode:", "$ hp-setup", "example", False),
("Setup using GUI mode, specifying usb:", "$ hp-setup -b usb", "example", False),
("Setup using GUI mode, specifying an IP:", "$ hp-setup 192.168.0.101", "example", False),
("One USB printer attached, automatic:", "$ hp-setup -i -a", "example", False),
("USB, IDs specified:", "$ hp-setup -i 001:002", "example", False),
("Network:", "$ hp-setup -i 66.35.250.209", "example", False),
("Network, Jetdirect port 2:", "$ hp-setup -i --port=2 66.35.250.209", "example", False),
("Parallel:", "$ hp-setup -i /dev/parport0", "example", False),
("USB or parallel, using serial number:", "$ hp-setup -i US12345678A", "example", False),
("USB, automatic:", "$ hp-setup -i --auto 001:002", "example", False),
("Parallel, automatic, no testpage:", "$ hp-setup -i -a -x /dev/parport0", "example", False),
("Parallel, choose device:", "$ hp-setup -i -b par", "example", False),
utils.USAGE_SPACE,
utils.USAGE_NOTES,
("1. If no serial number, USB ID, IP, or device node is specified, the USB and parallel busses will be probed for devices.", "", 'note', False),
("2. Using 'lsusb' to obtain USB IDs: (example)", "", 'note', False),
(" $ lsusb", "", 'note', False),
(" Bus 003 Device 011: ID 03f0:c202 Hewlett-Packard", "", 'note', False),
(" $ hp-setup --auto 003:011", "", 'note', False),
(" (Note: You may have to run 'lsusb' from /sbin or another location. Use '$ locate lsusb' to determine this.)", "", 'note', True),
("3. Parameters -a, -f, -p, or -t are not valid in GUI (-u) mode.", "", 'note', True),
utils.USAGE_SPACE,
utils.USAGE_SEEALSO,
("hp-makeuri", "", "seealso", False),
("hp-probe", "", "seealso", False),
]
mod = module.Module(__mod__, __title__, __version__, __doc__, USAGE,
(INTERACTIVE_MODE, GUI_MODE),
(UI_TOOLKIT_QT3, UI_TOOLKIT_QT4, UI_TOOLKIT_QT5),
run_as_root_ok=True)
opts, device_uri, printer_name, mode, ui_toolkit, loc = \
mod.parseStdOpts('axp:P:f:t:b:d:rq',
['ttl=', 'filter=', 'search=', 'find=',
'method=', 'time-out=', 'timeout=',
'printer=', 'fax=', 'type=', 'port=',
'auto', 'device=', 'rm', 'remove'],
handle_device_printer=False)
selected_device_name = None
printer_name = None
fax_name = None
bus = None
setup_print = True
setup_fax = True
makeuri = None
auto = False
testpage_in_auto_mode = True
jd_port = 1
remove = False
ignore_plugin_check = False
for o, a in opts:
if o == '-x':
testpage_in_auto_mode = False
elif o in ('-P', '-p', '--printer'):
printer_name = a
elif o in ('-f', '--fax'):
fax_name = a
elif o in ('-d', '--device'):
device_uri = a
elif o in ('-b', '--bus'):
bus = [x.lower().strip() for x in a.split(',')]
if not device.validateBusList(bus, False):
mod.usage(error_msg=['Invalid bus name'])
elif o in ('-t', '--type'):
setup_fax, setup_print = False, False
a = a.strip().lower()
for aa in a.split(','):
if aa.strip() not in ('print', 'fax'):
mod.usage(error_msg=['Invalid type.'])
if aa.strip() == 'print':
setup_print = True
elif aa.strip() == 'fax':
if not prop.fax_build:
log.error("Cannot enable fax setup - HPLIP not built with fax enabled.")
else:
setup_fax = True
elif o == '--port':
try:
jd_port = int(a)
except ValueError:
#log.error("Invalid port number. Must be between 1 and 3 inclusive.")
mod.usage(error_msg=['Invalid port number. Must be between 1 and 3 inclusive.'])
elif o in ('-a', '--auto'):
auto = True
elif o in ('-r', '--rm', '--remove'):
remove = True
elif o in ('-q'):
ignore_plugin_check = True
try:
param = mod.args[0]
except IndexError:
param = ''
log.debug("param=%s" % param)
if printer_name is not None:
selected_device_name = printer_name
else:
if fax_name is not None:
selected_device_name = fax_name
log.debug("selected_device_name=%s" % selected_device_name)
if mode == GUI_MODE:
if selected_device_name is not None:
log.warning("-p or -f option is not supported")
if ui_toolkit == 'qt3':
if not utils.canEnterGUIMode():
log.error("%s requires GUI support (try running with --qt4). Also, try using interactive (-i) mode." % __mod__)
clean_exit(1)
else:
if not utils.canEnterGUIMode4():
log.error("%s requires GUI support (try running with --qt3). Also, try using interactive (-i) mode." % __mod__)
clean_exit(1)
if mode == GUI_MODE:
if ui_toolkit == 'qt3':
try:
from qt import *
from ui import setupform
except ImportError:
log.error("Unable to load Qt3 support. Is it installed?")
clean_exit(1)
if remove:
log.warn("-r/--rm/--remove not supported in qt3 mode.")
app = QApplication(sys.argv)
QObject.connect(app, SIGNAL("lastWindowClosed()"), app, SLOT("quit()"))
if loc is None:
loc = user_conf.get('ui', 'loc', 'system')
if loc.lower() == 'system':
loc = str(QTextCodec.locale())
log.debug("Using system locale: %s" % loc)
if loc.lower() != 'c':
e = 'utf8'
try:
l, x = loc.split('.')
loc = '.'.join([l, e])
except ValueError:
l = loc
loc = '.'.join([loc, e])
log.debug("Trying to load .qm file for %s locale." % loc)
trans = QTranslator(None)
qm_file = 'hplip_%s.qm' % l
log.debug("Name of .qm file: %s" % qm_file)
loaded = trans.load(qm_file, prop.localization_dir)
if loaded:
app.installTranslator(trans)
else:
loc = 'c'
if loc == 'c':
log.debug("Using default 'C' locale")
else:
log.debug("Using locale: %s" % loc)
QLocale.setDefault(QLocale(loc))
prop.locale = loc
try:
locale.setlocale(locale.LC_ALL, locale.normalize(loc))
except locale.Error:
pass
try:
w = setupform.SetupForm(bus, param, jd_port)
except Error:
log.error("Unable to connect to HPLIP I/O. Please (re)start HPLIP and try again.")
clean_exit(1)
app.setMainWidget(w)
w.show()
app.exec_loop()
cups.releaseCupsInstance()
else: # qt4
# if utils.ui_status[1] == "PyQt4":
# try:
# from PyQt4.QtGui import QApplication, QMessageBox
# from ui4.setupdialog import SetupDialog
# except ImportError as e:
# log.error(e)
# clean_exit(1)
# elif utils.ui_status[1] == "PyQt5":
# try:
# from PyQt5.QtWidgets import QApplication, QMessageBox
# from ui5.setupdialog import SetupDialog
# except ImportError as e:
# log.error(e)
# clean_exit(1)
# else:
# log.error("Unable to load Qt support. Is it installed?")
# clean_exit(1)
QApplication, ui_package = utils.import_dialog(ui_toolkit)
ui = import_module(ui_package + ".setupdialog")
app = QApplication(sys.argv)
log.debug("Sys.argv=%s printer_name=%s param=%s jd_port=%s device_uri=%s remove=%s" % (sys.argv, printer_name, param, jd_port, device_uri, remove))
dlg = ui.SetupDialog(None, param, jd_port, device_uri, remove)
dlg.show()
try:
log.debug("Starting GUI Event Loop...")
app.exec_()
except KeyboardInterrupt:
clean_exit(0)
else: # INTERACTIVE_MODE
try:
try:
from base import password
except ImportError:
log.warn("Failed to import Password Object")
else:
cups.setPasswordCallback(password.showPasswordPrompt)
#Removing Queue
if remove:
tui.header("REMOVING PRINT/FAX QUEUE")
sts, printer_name, device_uri = mod.getPrinterName(selected_device_name,None,['hp','hpfax'])
selected_device_name = printer_name
log.info (log.bold("Removing '%s : %s' Queue"%(printer_name, device_uri)))
status, status_str = cups.cups_operation(cups.delPrinter, INTERACTIVE_MODE, '', None, selected_device_name)
if cups.IPP_OK == status:
log.info("Successfully deleted %s Print/Fax queue"%selected_device_name)
utils.sendEvent(EVENT_CUPS_QUEUES_REMOVED,device_uri, printer_name)
clean_exit(0)
else:
log.error("Failed to delete %s Print/Fax queue. Error : %s"%(selected_device_name,status_str))
clean_exit(1)
if not auto:
log.info("(Note: Defaults for each question are maked with a '*'. Press <enter> to accept the default.)")
log.info("")
# ******************************* MAKEURI
if param:
device_uri, sane_uri, fax_uri = device.makeURI(param, jd_port)
# ******************************* CONNECTION TYPE CHOOSER
if not device_uri and bus is None:
bus = tui.connection_table()
if bus is None:
clean_exit(0)
log.info("\nUsing connection type: %s" % bus[0])
log.info("")
# ******************************* DEVICE CHOOSER
if not device_uri:
log.debug("\nDEVICE CHOOSER setup_fax=%s, setup_print=%s" % (setup_fax, setup_print))
device_uri = mod.getDeviceUri(devices = device.probeDevices(bus))
if not device_uri:
clean_exit(0)
# ******************************* QUERY MODEL AND COLLECT PPDS
log.info(log.bold("\nSetting up device: %s\n" % device_uri))
log.info("")
print_uri = device_uri.replace("hpfax:", "hp:")
fax_uri = device_uri.replace("hp:", "hpfax:")
back_end, is_hp, bus, model, \
serial, dev_file, host, zc, port = \
device.parseDeviceURI(device_uri)
log.debug("Model=%s" % model)
mq = device.queryModelByURI(device_uri)
if not mq or mq.get('support-type', SUPPORT_TYPE_NONE) == SUPPORT_TYPE_NONE:
log.error("Unsupported printer model.")
clean_exit(1)
if mq.get('fax-type', FAX_TYPE_NONE) in (FAX_TYPE_NONE, FAX_TYPE_NOT_SUPPORTED) and setup_fax:
#log.warning("Cannot setup fax - device does not have fax feature.")
setup_fax = False
# ******************************* PLUGIN
norm_model = models.normalizeModelName(model).lower()
plugin = mq.get('plugin', PLUGIN_NONE)
if ignore_plugin_check is False and plugin > PLUGIN_NONE:
from installer import pluginhandler
pluginObj = pluginhandler.PluginHandle()
plugin_sts = pluginObj.getStatus()
if plugin_sts != pluginhandler.PLUGIN_INSTALLED:
if plugin_sts == pluginhandler.PLUGIN_VERSION_MISMATCH:
tui.header("UPDATING PLUGIN")
else:
tui.header("PLUG-IN INSTALLATION")
hp_plugin = utils.which('hp-plugin')
if hp_plugin:
cmd = "hp-plugin -i"
if os_utils.execute(cmd) != 0:
log.error("Failed to install Plugin.")
log.error("The device you are trying to setup requires a binary plug-in. Some functionalities may not work as expected without plug-ins. Please run 'hp-plugin' as normal user to install plug-ins.Visit http://hplipopensource.com for more infomation.")
clean_exit(1)
ppds = cups.getSystemPPDs()
default_model = utils.xstrip(model.replace('series', '').replace('Series', ''), '_')
installed_print_devices = device.getSupportedCUPSDevices(['hp'])
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
log.debug("found print queue '%s'" % p)
installed_fax_devices = device.getSupportedCUPSDevices(['hpfax'])
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
log.debug("found fax queue '%s'" % f)
# ******************************* PRINT QUEUE SETUP
if setup_print:
tui.header("PRINT QUEUE SETUP")
if not auto and print_uri in installed_print_devices:
log.warning("One or more print queues already exist for this device: %s." %
', '.join(installed_print_devices[print_uri]))
ok, setup_print = tui.enter_yes_no("\nWould you like to install another print queue for this device", 'n')
if not ok: clean_exit(0)
if setup_print:
if auto:
printer_name = default_model
printer_default_model = default_model
installed_printer_names = device.getSupportedCUPSPrinterNames(['hp'])
# Check for duplicate names
if (device_uri in installed_print_devices and printer_default_model in installed_print_devices[device_uri]) \
or (printer_default_model in installed_printer_names):
i = 2
while True:
t = printer_default_model + "_%d" % i
if (t not in installed_printer_names) and(device_uri not in installed_print_devices or t not in installed_print_devices[device_uri]):
printer_default_model += "_%d" % i
break
i += 1
if not auto:
if printer_name is None:
while True:
printer_name = input(log.bold("\nPlease enter a name for this print queue (m=use model name:'%s'*, q=quit) ?" % printer_default_model))
if printer_name.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
if not printer_name or printer_name.lower().strip() == 'm':
printer_name = printer_default_model
name_ok = True
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
if printer_name == p:
log.error("A print queue with that name already exists. Please enter a different name.")
name_ok = False
break
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
if printer_name == f:
log.error("A fax queue with that name already exists. Please enter a different name.")
name_ok = False
break
for c in printer_name:
if c in cups.INVALID_PRINTER_NAME_CHARS:
log.error("Invalid character '%s' in printer name. Please enter a name that does not contain this character." % c)
name_ok = False
if name_ok:
break
else:
printer_name = printer_default_model
log.info("Using queue name: %s" % printer_name)
default_model = utils.xstrip(model.replace('series', '').replace('Series', ''), '_')
log.info("Locating PPD file... Please wait.")
print_ppd = cups.getPPDFile2(mq, default_model, ppds)
enter_ppd = False
if print_ppd is None:
enter_ppd = True
log.error("Unable to find an appropriate PPD file.")
else:
print_ppd, desc = print_ppd
log.info("\nFound PPD file: %s" % print_ppd)
log.info("Description: %s" % desc)
#
if not auto:
log.info("\nNote: The model number may vary slightly from the actual model number on the device.")
ok, ans = tui.enter_yes_no("\nDoes this PPD file appear to be the correct one")
if not ok: clean_exit(0)
if not ans: enter_ppd = True
if enter_ppd:
enter_ppd = False
ok, enter_ppd = tui.enter_yes_no("\nWould you like to specify the path to the correct PPD file to use", 'n')
if not ok: clean_exit(0)
if enter_ppd:
ok = False
while True:
user_input = input(log.bold("\nPlease enter the full filesystem path to the PPD file to use (q=quit) :"))
if user_input.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
file_path = user_input
if os.path.exists(file_path) and os.path.isfile(file_path):
if file_path.endswith('.gz'):
nickname = gzip.GzipFile(file_path, 'r').read(4096)
else:
nickname = open(file_path, 'r').read(4096)
try:
desc = nickname_pat.search(nickname).group(1)
except AttributeError:
desc = ''
if desc:
log.info("Description for the file: %s" % desc)
else:
log.error("No PPD 'NickName' found. This file may not be a valid PPD file.")
ok, ans = tui.enter_yes_no("\nUse this file")
if not ok: clean_exit(0)
if ans: print_ppd = file_path
else:
log.error("File not found or not an appropriate (PPD) file.")
if ok:
break
else:
log.error("PPD file required. Setup cannot continue. Exiting.")
clean_exit(1)
if auto:
location, info = '', '%s Device (Automatically setup by HPLIP)'%(default_model.replace('_',' '))
else:
while True:
location = input(log.bold("Enter a location description for this printer (q=quit) ?"))
if location.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
while True:
info = input(log.bold("Enter additonal information or notes for this printer (q=quit) ?"))
if info.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
log.info(log.bold("\nAdding print queue to CUPS:"))
log.info("Device URI: %s" % print_uri)
log.info("Queue name: %s" % printer_name)
log.info("PPD file: %s" % print_ppd)
log.info("Location: %s" % location)
log.info("Information: %s" % info)
if not os.path.exists(print_ppd): # assume foomatic: or some such
add_prnt_args = (printer_name, print_uri, location, '', print_ppd, info)
else:
add_prnt_args = (printer_name, print_uri, location, print_ppd, '', info)
status, status_str = cups.cups_operation(cups.addPrinter, INTERACTIVE_MODE, '', None, *add_prnt_args)
log.debug("addPrinter() returned (%d, %s)" % (status, status_str))
log.debug(device.getSupportedCUPSDevices(['hp']))
if status != cups.IPP_OK:
log.error("Printer queue setup failed. Error : %s "%status_str)
clean_exit(1)
else:
# sending Event to add this device in hp-systray
utils.sendEvent(EVENT_CUPS_QUEUES_ADDED,print_uri, printer_name)
# Updating firmware download for supported devices.
if ignore_plugin_check is False and mq.get('fw-download', False):
try:
d = device.Device(print_uri)
except Error:
log.error("Error opening device. Firmware download is Failed.")
else:
if d.downloadFirmware():
log.info("Firmware download successful.\n")
else:
log.error("Firmware download is Failed.")
d.close()
# ******************************* FAX QUEUE SETUP
if setup_fax and not prop.fax_build:
log.error("Cannot setup fax - HPLIP not built with fax enabled.")
setup_fax = False
if setup_fax:
try:
from fax import fax
except ImportError:
# This can fail on Python < 2.3 due to the datetime module
setup_fax = False
log.warning("Fax setup disabled - Python 2.3+ required.")
log.info("")
if setup_fax:
tui.header("FAX QUEUE SETUP")
if not auto and fax_uri in installed_fax_devices:
log.warning("One or more fax queues already exist for this device: %s." % ', '.join(installed_fax_devices[fax_uri]))
ok, setup_fax = tui.enter_yes_no("\nWould you like to install another fax queue for this device", 'n')
if not ok: clean_exit(0)
if setup_fax:
if auto: # or fax_name is None:
fax_name = default_model + '_fax'
fax_default_model = default_model + '_fax'
installed_fax_names = device.getSupportedCUPSPrinterNames(['hpfax'])
# Check for duplicate names
if (fax_uri in installed_fax_devices and fax_default_model in installed_fax_devices[fax_uri]) \
or (fax_default_model in installed_fax_names):
i = 2
while True:
t = fax_default_model + "_%d" % i
if (t not in installed_fax_names) and (fax_uri not in installed_fax_devices or t not in installed_fax_devices[fax_uri]):
fax_default_model += "_%d" % i
break
i += 1
if not auto:
if fax_name is None:
while True:
fax_name = input(log.bold("\nPlease enter a name for this fax queue (m=use model name:'%s'*, q=quit) ?" % fax_default_model))
if fax_name.lower().strip() == 'q':
log.info("OK, done.")
clean_exit(0)
if not fax_name or fax_name.lower().strip() == 'm':
fax_name = fax_default_model
name_ok = True
for d in list(installed_print_devices.keys()):
for p in installed_print_devices[d]:
if fax_name == p:
log.error("A print queue with that name already exists. Please enter a different name.")
name_ok = False
break
for d in list(installed_fax_devices.keys()):
for f in installed_fax_devices[d]:
if fax_name == f:
log.error("A fax queue with that name already exists. Please enter a different name.")
name_ok = False
break
for c in fax_name:
if c in (' ', '#', '/', '%'):
log.error("Invalid character '%s' in fax name. Please enter a name that does not contain this character." % c)
name_ok = False
if name_ok:
break
else:
fax_name = fax_default_model
log.info("Using queue name: %s" % fax_name)
fax_ppd,fax_ppd_type,nick = cups.getFaxPPDFile(mq, fax_name)
if not fax_ppd:
log.error("Unable to find HP fax PPD file! Please check you HPLIP installation and try again.")
clean_exit(1)
if auto:
location, info = '', '%s Fax Device (Automatically setup by HPLIP)'%(default_model.replace('_',' '))
else:
while True:
location = input(log.bold("Enter a location description for this printer (q=quit) ?"))
if location.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
while True:
info = input(log.bold("Enter additonal information or notes for this printer (q=quit) ?"))
if info.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
# TODO: Validate chars
break
log.info(log.bold("\nAdding fax queue to CUPS:"))
log.info("Device URI: %s" % fax_uri)
log.info("Queue name: %s" % fax_name)
log.info("PPD file: %s" % fax_ppd)
log.info("Location: %s" % location)
log.info("Information: %s" % info)
cups.setPasswordPrompt("You do not have permission to add a fax device.")
if not os.path.exists(fax_ppd): # assume foomatic: or some such
status, status_str = cups.addPrinter(fax_name, fax_uri,
location, '', fax_ppd, info)
else:
status, status_str = cups.addPrinter(fax_name, fax_uri,
location, fax_ppd, '', info)
log.debug("addPrinter() returned (%d, %s)" % (status, status_str))
log.debug(device.getSupportedCUPSDevices(['hpfax']))
if status != cups.IPP_OK:
log.error("Fax queue setup failed. Error : %s"%status_str)
clean_exit(1)
else:
# sending Event to add this device in hp-systray
utils.sendEvent(EVENT_CUPS_QUEUES_ADDED,fax_uri, fax_name)
# ******************************* FAX HEADER SETUP
tui.header("FAX HEADER SETUP")
if auto:
setup_fax = False
else:
while True:
user_input = input(log.bold("\nWould you like to perform fax header setup (y=yes*, n=no, q=quit) ?")).strip().lower()
if user_input == 'q':
log.info("OK, done.")
clean_exit(0)
if not user_input:
user_input = 'y'
setup_fax = (user_input == 'y')
if user_input in ('y', 'n', 'q'):
break
log.error("Please enter 'y' or 'n'")
if setup_fax:
d = fax.getFaxDevice(fax_uri, disable_dbus=True)
try:
d.open()
except Error:
log.error("Unable to communicate with the device. Please check the device and try again.")
else:
try:
tries = 0
ok = True
while True:
tries += 1
try:
current_phone_num = str(d.getPhoneNum())
current_station_name = to_unicode(d.getStationName())
except Error:
log.error("Could not communicate with device. Device may be busy. Please wait for retry...")
time.sleep(5)
ok = False
if tries > 12:
break
else:
ok = True
break
if ok:
while True:
if current_phone_num:
phone_num = input(log.bold("\nEnter the fax phone number for this device (c=use current:'%s'*, q=quit) ?" % current_phone_num))
else:
phone_num = input(log.bold("\nEnter the fax phone number for this device (q=quit) ?"))
if phone_num.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
if current_phone_num and (not phone_num or phone_num.strip().lower() == 'c'):
phone_num = current_phone_num
if len(phone_num) > 50:
log.error("Phone number length is too long (>50 characters). Please enter a shorter number.")
continue
ok = True
for x in phone_num:
if x not in '0123456789-(+) ':
log.error("Invalid characters in phone number. Please only use 0-9, -, (, +, and )")
ok = False
break
if not ok:
continue
break
while True:
if current_station_name:
station_name = input(log.bold("\nEnter the name and/or company for this device (c=use current:'%s'*, q=quit) ?"%from_unicode_to_str(current_station_name)))
else:
station_name = input(log.bold("\nEnter the name and/or company for this device (q=quit) ?"))
if station_name.strip().lower() == 'q':
log.info("OK, done.")
clean_exit(0)
if current_station_name and (not station_name or station_name.strip().lower() == 'c'):
station_name = current_station_name
### Here station_name can be unicode or utf-8 sequence.
### making sure to convert data to unicode for all the cases.
try:
station_name.encode('utf-8')
except (UnicodeEncodeError,UnicodeDecodeError):
station_name = station_name.decode('utf-8')
if len(station_name) > 50:
log.error("Name/company length is too long (>50 characters). Please enter a shorter name/company.")
continue
break
try:
d.setStationName(station_name)
d.setPhoneNum(phone_num)
except Error:
log.error("Could not communicate with device. Device may be busy.")
else:
log.info("\nParameters sent to device.")
finally:
d.close()
# ******************************* TEST PAGE
if setup_print:
print_test_page = False
tui.header("PRINTER TEST PAGE")
if auto:
if testpage_in_auto_mode:
print_test_page = True
else:
ok, print_test_page = tui.enter_yes_no("\nWould you like to print a test page")
if not ok: clean_exit(0)
if print_test_page:
path = utils.which('hp-testpage')
if printer_name:
param = "-p%s" % printer_name
else:
param = "-d%s" % print_uri
if len(path) > 0:
cmd = 'hp-testpage -i %s' % param
else:
cmd = 'python ./testpage.py -i %s' % param
os_utils.execute(cmd)
except KeyboardInterrupt:
log.error("User exit")
cups.releaseCupsInstance()
log.info("")
log.info("Done.")
| 139 | 0 | 46 |
d1025a1b3556bbad21ed260c8b4b790bc9e1dbc7 | 3,573 | py | Python | src/sniffer/pyhttp/link_base.py | ptphp/PyLib | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | [
"Apache-2.0"
] | 1 | 2020-02-17T08:18:29.000Z | 2020-02-17T08:18:29.000Z | src/sniffer/pyhttp/link_base.py | ptphp/PyLib | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | [
"Apache-2.0"
] | null | null | null | src/sniffer/pyhttp/link_base.py | ptphp/PyLib | 07ac99cf2deb725475f5771b123b9ea1375f5e65 | [
"Apache-2.0"
] | null | null | null | #encoding=UTF-8
"""
@author ideawu@163.com
@link http://www.ideawu.net/
"""
import new, socket
from buffer import *
LINK_ROLE_SERVER = 1
LINK_ROLE_CLIENT = 2
LINK_ROLE_ACCEPT = 3
class LinkBase:
# TODO: accept_all(self):
""" 判断是否已经读就绪 """
""" 进行一次网络读操作 """
""" 进行一次网络写操作
@return
-1: 错误
0 : 建议调用者关闭连接
"""
""" 非阻塞发送(数据拷贝到发送缓冲) """
""" 非阻塞读取 """
""" 见 send_packet, 只传入要发送的报体 """
""" 见 recv_packet, 只返回报体部分 """
""" 非阻塞的 send_packet """
""" 非阻塞的 recv_packet """
""" 将报文写到发送缓冲里
@param urgent: 若为True, 则等待网络发送完毕才返回. 默认等待.
@return
-1: 错误
"""
| 21.267857 | 61 | 0.676742 | #encoding=UTF-8
"""
@author ideawu@163.com
@link http://www.ideawu.net/
"""
import new, socket
from buffer import *
LINK_ROLE_SERVER = 1
LINK_ROLE_CLIENT = 2
LINK_ROLE_ACCEPT = 3
class LinkBase:
def __init__(self, sock=None):
self.id = -1
self.fd = None
self.sock = None
self.local_addr = '' # ip:port
self.remote_addr = '' # ip:port
self.parent = None
self.role = None
self.ptr = None
self.alive = False
self.recv_pkt = None
self.recv_buf = Buffer();
self.send_buf = Buffer();
def is_client(self):
return self.role == LINK_ROLE_CLIENT
def is_server(self):
return self.role == LINK_ROLE_SERVER
def is_accept(self):
return self.role == LINK_ROLE_ACCEPT
def listen(self, host, port, backlog=128):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(backlog)
except BaseException, e:
return False
self.role = LINK_ROLE_SERVER
self.set_sock(sock)
# TODO: accept_all(self):
def accept(self):
sock, addr = self.sock.accept()
link = new.instance(self.__class__)
link.__init__(sock)
link.role = LINK_ROLE_ACCEPT
link.parent = self
link.remote_addr = "%s:%d" % sock.getpeername()
return link
def connect(self, host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.role = LINK_ROLE_CLIENT
self.set_sock(sock)
self.remote_addr = "%s:%d" % sock.getpeername()
def set_sock(self, sock):
self.fd = sock.fileno()
self.sock = sock
self.alive = True
self.local_addr = "%s:%d" % sock.getsockname()
def is_alive(self):
return self.alive
def close(self):
self.alive = False
try:
self.sock.close()
self.sock = None
except:
pass
def fileno(self):
return self.fd
""" 判断是否已经读就绪 """
def recv_ready(self):
return self.recv_pkt.ready()
""" 进行一次网络读操作 """
def net_recv(self, bufsize=8192):
try:
data = self.sock.recv(bufsize)
#data = self.sock.recv(3)
#print 'link <-', repr(data)
except BaseException,e:
return -1
if not data:
return 0
self.recv_buf.append(data)
return len(data)
""" 进行一次网络写操作
@return
-1: 错误
0 : 建议调用者关闭连接
"""
def net_send(self):
try:
len = self.sock.send(self.send_buf.base)
#len = self.sock.send(self.send_buf.base[0:3])
#print 'link ->', repr(self.send_buf.base[0:len])
except BaseException,e:
return -1
self.send_buf.consume(len)
return len
""" 非阻塞发送(数据拷贝到发送缓冲) """
def async_send(self, data):
return self.send(data, urgent=False)
""" 非阻塞读取 """
def async_recv(self):
return self.recv(block=False)
""" 见 send_packet, 只传入要发送的报体 """
def send(self, data, urgent=True):
packet = self.PacketClass()
packet.set_body(data)
ret = self.send_packet(packet, urgent)
return ret
""" 见 recv_packet, 只返回报体部分 """
def recv(self, block=True):
ret = self.recv_packet(block)
if ret == -1:
return -1
elif ret == None:
return None
else:
return ret.body
""" 非阻塞的 send_packet """
def async_send_packet(self, packet):
return self.send_packet(packet, urgent=False)
""" 非阻塞的 recv_packet """
def async_recv_packet(self):
return self.recv_packet(block=False)
""" 将报文写到发送缓冲里
@param urgent: 若为True, 则等待网络发送完毕才返回. 默认等待.
@return
-1: 错误
"""
def send_packet(self, packet, urgent=True):
data = packet.encode()
self.send_buf.append(data)
if urgent:
while self.send_buf.len() > 0:
if self.net_send() == -1:
return -1
return len(data)
| 2,512 | 0 | 493 |
c0e07cd719fc06a21624cf9c6db2d171d21959fd | 607 | py | Python | python/lambda-container/app.py | gsy0911/aws-cdk-small-examples | e4b4be076d22f5d35f640e59a6ca346988baa1af | [
"Apache-2.0"
] | 2 | 2021-01-19T18:15:22.000Z | 2021-02-09T22:18:09.000Z | python/lambda-container/app.py | gsy0911/aws-cdk-small-examples | e4b4be076d22f5d35f640e59a6ca346988baa1af | [
"Apache-2.0"
] | 14 | 2020-11-20T01:54:27.000Z | 2021-01-12T08:15:39.000Z | python/lambda-container/app.py | gsy0911/aws-cdk-small-examples | e4b4be076d22f5d35f640e59a6ca346988baa1af | [
"Apache-2.0"
] | null | null | null | from aws_cdk import (
aws_lambda as lambda_,
core,
)
if __name__ == "__main__":
main()
| 20.931034 | 58 | 0.589786 | from aws_cdk import (
aws_lambda as lambda_,
core,
)
class LambdaContainer(core.Stack):
def __init__(self, app: core.App, _id: str):
super().__init__(scope=app, id=_id)
_ = lambda_.DockerImageFunction(
scope=self,
id="container_function",
code=lambda_.DockerImageCode.from_image_asset(
directory="docker",
repository_name="lambda_container_example"
)
)
def main():
app = core.App()
LambdaContainer(app, "LambdaContainer")
app.synth()
if __name__ == "__main__":
main()
| 419 | 13 | 72 |
4e1ec56dd39e36309c54aecaacd371e62ead720e | 992 | py | Python | src/mofdb_client/temperature_point.py | n8ta/mofdb-client | 8f5f5c5db98092752879b5a4c986ebd2ea34a985 | [
"MIT"
] | 1 | 2022-03-29T22:39:06.000Z | 2022-03-29T22:39:06.000Z | src/mofdb_client/temperature_point.py | n8ta/mofdb-client | 8f5f5c5db98092752879b5a4c986ebd2ea34a985 | [
"MIT"
] | 1 | 2022-03-25T01:02:18.000Z | 2022-03-25T01:06:47.000Z | src/mofdb_client/temperature_point.py | n8ta/mofdb-client | 8f5f5c5db98092752879b5a4c986ebd2ea34a985 | [
"MIT"
] | 1 | 2022-03-23T16:37:54.000Z | 2022-03-23T16:37:54.000Z | import dataclasses
from typing import List
@dataclasses.dataclass
class GasAtTemp:
"""Adsorption is in the selected loading units and composition is based on the compositionType of the isotherm """
InChIKey: str
name: str
composition: float
adsorption: float
@dataclasses.dataclass
class TemperaturePoint:
"""A single temperature point on an isotherm. It may contain data for multiple different gases if this is a
multicomponent isotherm. See the species_data field for adsorptions of each gas."""
pressure: float
species_data: List[GasAtTemp]
| 35.428571 | 118 | 0.704637 | import dataclasses
from typing import List
@dataclasses.dataclass
class GasAtTemp:
"""Adsorption is in the selected loading units and composition is based on the compositionType of the isotherm """
def __init__(self, json: dict):
self.InChIKey = json["InChIKey"]
self.name = json["name"]
self.composition = json["composition"]
self.adsorption = json["adsorption"]
InChIKey: str
name: str
composition: float
adsorption: float
@dataclasses.dataclass
class TemperaturePoint:
"""A single temperature point on an isotherm. It may contain data for multiple different gases if this is a
multicomponent isotherm. See the species_data field for adsorptions of each gas."""
def __init__(self, json: dict):
self.pressure = json["pressure"]
self.total_adsorption = json["total_adsorption"]
self.species_data = [GasAtTemp(x) for x in json["species_data"]]
pressure: float
species_data: List[GasAtTemp]
| 357 | 0 | 52 |
0a55c0267a6b0df8495a0f1eda37bf22b685b699 | 1,145 | py | Python | examples/regime_hmm_backtest.py | zhengshouzhi/qstrader-master | 5dc888f72a2ddde8497ff7ae44af68524ec8696c | [
"MIT"
] | 1 | 2018-08-27T07:44:39.000Z | 2018-08-27T07:44:39.000Z | examples/regime_hmm_backtest.py | zhengshouzhi/qstrader-master | 5dc888f72a2ddde8497ff7ae44af68524ec8696c | [
"MIT"
] | null | null | null | examples/regime_hmm_backtest.py | zhengshouzhi/qstrader-master | 5dc888f72a2ddde8497ff7ae44af68524ec8696c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from qstrader.risk_manager.example import ExampleRiskManager
import os
import datetime
from qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler
from qstrader.compat import queue
# regime_hmm_backtest.py
import datetime
import pickle
import click
import numpy as np
from qstrader import settings
from qstrader.compat import queue
from qstrader.price_parser import PriceParser
from qstrader.price_handler.yahoo_daily_csv_bar import \
YahooDailyCsvBarPriceHandler
from qstrader.strategy import Strategies, DisplayStrategy
from qstrader.position_sizer.naive import NaivePositionSizer
from qstrader.risk_manager.example import ExampleRiskManager
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.compliance.example import ExampleCompliance
from qstrader.execution_handler.ib_simulated import \
IBSimulatedExecutionHandler
from qstrader.statistics.tearsheet import TearsheetStatistics
from qstrader.trading_session.backtest import Backtest
from .regime_hmm_strategy import MovingAverageCrossStrategy
from qstrader.risk_manager.regime_hmm_risk_manager import RegimeHMMRiskManager | 39.482759 | 83 | 0.883843 | # -*- coding: utf-8 -*-
from qstrader.risk_manager.example import ExampleRiskManager
import os
import datetime
from qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler
from qstrader.compat import queue
# regime_hmm_backtest.py
import datetime
import pickle
import click
import numpy as np
from qstrader import settings
from qstrader.compat import queue
from qstrader.price_parser import PriceParser
from qstrader.price_handler.yahoo_daily_csv_bar import \
YahooDailyCsvBarPriceHandler
from qstrader.strategy import Strategies, DisplayStrategy
from qstrader.position_sizer.naive import NaivePositionSizer
from qstrader.risk_manager.example import ExampleRiskManager
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.compliance.example import ExampleCompliance
from qstrader.execution_handler.ib_simulated import \
IBSimulatedExecutionHandler
from qstrader.statistics.tearsheet import TearsheetStatistics
from qstrader.trading_session.backtest import Backtest
from .regime_hmm_strategy import MovingAverageCrossStrategy
from qstrader.risk_manager.regime_hmm_risk_manager import RegimeHMMRiskManager | 0 | 0 | 0 |
f7a8e478a2010f9346166267de4ed8b1acb67692 | 1,079 | py | Python | lex2/_intf_matcher.py | DeltaRazero/liblex2-py3 | 1d6fe9f11225a436b842bd41afb6e6675e549f98 | [
"Zlib"
] | 1 | 2022-01-11T17:26:37.000Z | 2022-01-11T17:26:37.000Z | lex2/_intf_matcher.py | DeltaRazero/liblex2-py3 | 1d6fe9f11225a436b842bd41afb6e6675e549f98 | [
"Zlib"
] | 1 | 2021-04-19T20:29:38.000Z | 2021-04-19T20:29:38.000Z | lex2/_intf_matcher.py | DeltaRazero/liblex2-py3 | 1d6fe9f11225a436b842bd41afb6e6675e549f98 | [
"Zlib"
] | null | null | null | """<internal>"""
'''
zlib License
(C) 2020-2021 DeltaRazero
All rights reserved.
'''
# ***************************************************************************************
class _:
'<imports>'
import abc
from . import textio
from .misc import ptr_t
# ***************************************************************************************
class IMatcher (metaclass=_.abc.ABCMeta):
"""Common interface to a rule matcher object instance.
"""
# --- INTERFACE GETTERS --- #
@_.abc.abstractmethod
def GetVendorId(self) -> str:
"""Gets the lexer implementation identifier string (a.k.a. 'vendor ID').
Returns
-------
str
"""
pass
@_.abc.abstractmethod
def Match(self, ts: _.textio.ITextstream) -> _.ptr_t[str]:
"""Looks for a pattern match and returns string data in case of a match.
Returns
-------
ptr_t[str]
Nullable string object. Contains string data in case of a match, otherwise
NULL/None.
"""
pass
| 21.58 | 89 | 0.47544 | """<internal>"""
'''
zlib License
(C) 2020-2021 DeltaRazero
All rights reserved.
'''
# ***************************************************************************************
class _:
'<imports>'
import abc
from . import textio
from .misc import ptr_t
# ***************************************************************************************
class IMatcher (metaclass=_.abc.ABCMeta):
"""Common interface to a rule matcher object instance.
"""
# --- INTERFACE GETTERS --- #
@_.abc.abstractmethod
def GetVendorId(self) -> str:
"""Gets the lexer implementation identifier string (a.k.a. 'vendor ID').
Returns
-------
str
"""
pass
@_.abc.abstractmethod
def Match(self, ts: _.textio.ITextstream) -> _.ptr_t[str]:
"""Looks for a pattern match and returns string data in case of a match.
Returns
-------
ptr_t[str]
Nullable string object. Contains string data in case of a match, otherwise
NULL/None.
"""
pass
| 0 | 0 | 0 |
d08c0add7888f6a1525abbfcfe0a1c26ecb52f3a | 586 | py | Python | functions/apply-extension/main.py | moreal/AutoDMS-Background | fc87ce65e79e5aabdc4261a2d09f5a8b38035207 | [
"MIT"
] | 2 | 2019-04-17T13:19:23.000Z | 2019-04-18T00:00:47.000Z | functions/apply-extension/main.py | moreal/AutoDMS-v2 | fc87ce65e79e5aabdc4261a2d09f5a8b38035207 | [
"MIT"
] | 1 | 2019-06-17T14:44:43.000Z | 2019-06-17T14:44:43.000Z | functions/apply-extension/main.py | moreal/AutoDMS-Backend-v2 | fc87ce65e79e5aabdc4261a2d09f5a8b38035207 | [
"MIT"
] | null | null | null | import base64
import json
import logging
from dmsapi import DMSSession
| 26.636364 | 69 | 0.691126 | import base64
import json
import logging
from dmsapi import DMSSession
def apply_extension(data, context):
logging.info(data)
logging.info(context)
data = json.loads(base64.b64decode(data['data']).decode('utf-8'))
user_id, user_password = data['user_id'], data['user_password']
room, seat = data['position']
time = data['time']
session = DMSSession(user_id, user_password)
if session.extension.apply(time, room, seat):
logging.info(f"{user_id}: extension apply success")
else:
logging.error(f"{user_id}: extension apply failed")
| 491 | 0 | 23 |
00d0698c7ee709eb61303e6c5087902ada5c93e0 | 6,673 | py | Python | python/oneflow/test/modules/test_consistent_tensor_ops.py | Panlichen/oneflow | ad93c69c9932e5515aa31fb7f157073708810a3d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_consistent_tensor_ops.py | Panlichen/oneflow | ad93c69c9932e5515aa31fb7f157073708810a3d | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_consistent_tensor_ops.py | Panlichen/oneflow | ad93c69c9932e5515aa31fb7f157073708810a3d | [
"Apache-2.0"
] | 1 | 2021-12-15T02:14:49.000Z | 2021-12-15T02:14:49.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@autotest(n=1, check_graph=False)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@autotest(n=1, check_graph=False)
# PyTorch error if open auto_backward:
# element 0 of tensors does not require grad and does not have a grad_fn
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
@autotest(n=1, auto_backward=False, check_graph=False)
if __name__ == "__main__":
unittest.main()
| 33.873096 | 86 | 0.677357 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_type_as(test_case, shape, src_dtype, tgt_dtype, placement, sbp):
np_input = np.random.rand(*shape)
input = flow.tensor(np_input, dtype=src_dtype).to_global(placement, sbp)
target = flow.tensor(np_input, dtype=tgt_dtype).to_global(placement, sbp)
input = input.type_as(target)
test_case.assertEqual(input.dtype, target.dtype)
def _test_is_floating_point(test_case, shape, dtype, placement, sbp):
np_input = np.random.rand(*shape)
input = flow.tensor(np_input, dtype=dtype).to_global(placement, sbp)
output = input.is_floating_point()
if input.dtype in (flow.float, flow.float16, flow.float32, flow.double):
test_case.assertEqual(output, True)
else:
test_case.assertEqual(output, False)
@autotest(n=1, check_graph=False)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def _test_global_cuda(test_case, placement, sbp):
x = random_tensor(2, 8, 16).to_global(placement, sbp)
x = x.cuda()
y = x.sum()
return y
class TestConsistentCuda(flow.unittest.TestCase):
@globaltest
def test_global_cuda(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_cuda(test_case, placement, sbp)
@autotest(n=1, check_graph=False)
def _test_global_cpu(test_case, placement, sbp):
x = random_tensor(2, 8, 16).to_global(placement, sbp)
x = x.cpu()
y = x.sum()
return y
# PyTorch error if open auto_backward:
# element 0 of tensors does not require grad and does not have a grad_fn
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_long(test_case, placement, sbp):
x = random_tensor(2, 8, 16, requires_grad=True).to_global(placement, sbp)
y = x.long()
test_case.assertFalse(y.oneflow.requires_grad)
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_int(test_case, placement, sbp):
x = random_tensor(2, 8, 16, requires_grad=True).to_global(placement, sbp)
y = x.int()
test_case.assertFalse(y.oneflow.requires_grad)
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_float(test_case, placement, sbp):
x = random_tensor(2, 8, 16, dtype=int).to_global(placement, sbp)
y = x.float()
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_double(test_case, placement, sbp):
x = random_tensor(2, 8, 16, dtype=int).to_global(placement, sbp)
y = x.double()
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_item(test_case, placement, sbp):
x = random_tensor(ndim=1, dim0=1, dtype=int).to_global(placement, sbp)
y = torch.tensor(x.item())
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_tolist(test_case, placement, sbp):
x = random_tensor(ndim=4, dim0=8, dim1=16, dim2=24, dim3=32, dtype=int).to_global(
placement, sbp
)
y = torch.tensor(x.tolist())
return y
class TestConsistentTensorOps(flow.unittest.TestCase):
@globaltest
def test_global_cpu(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_cpu(test_case, placement, sbp)
@globaltest
def test_global_long(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_long(test_case, placement, sbp)
@globaltest
def test_global_int(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_int(test_case, placement, sbp)
@globaltest
def test_global_float(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_float(test_case, placement, sbp)
@globaltest
def test_global_double(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_double(test_case, placement, sbp)
@globaltest
def test_global_item(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=1, except_split=True):
_test_global_item(test_case, placement, sbp)
@globaltest
def test_global_tolist(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=4):
_test_global_tolist(test_case, placement, sbp)
@globaltest
def test_type_as(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(8, 16), (8, 16, 24), (8, 16, 24, 32)]
arg_dict["src_dtype"] = [flow.int64, flow.int32, flow.float32, flow.float64]
arg_dict["tgt_dtype"] = [flow.int64, flow.int32, flow.float32, flow.float64]
for arg in GenArgList(arg_dict):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=len(arg[0])):
_test_type_as(test_case, *arg, placement, sbp)
@globaltest
def test_is_floating_point(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(8, 16), (8, 16, 24), (8, 16, 24, 32)]
arg_dict["dtype"] = [
# flow.uint8, nccl don't support uint8
flow.int8,
flow.int32,
flow.int64,
flow.float32,
flow.float64,
flow.double,
flow.float,
flow.int,
]
for arg in GenArgList(arg_dict):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=len(arg[0])):
_test_is_floating_point(test_case, *arg, placement, sbp)
if __name__ == "__main__":
unittest.main()
| 4,444 | 489 | 268 |
0c80e9770f494dc247fc7a3426ed8b1bbc1db38f | 9,854 | py | Python | kerasnn.py | elmadj/tf.flights.delay.nn | 212c6a85552262bc6cdd7e4d611d2a507b6c03a2 | [
"Apache-2.0"
] | null | null | null | kerasnn.py | elmadj/tf.flights.delay.nn | 212c6a85552262bc6cdd7e4d611d2a507b6c03a2 | [
"Apache-2.0"
] | null | null | null | kerasnn.py | elmadj/tf.flights.delay.nn | 212c6a85552262bc6cdd7e4d611d2a507b6c03a2 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import keras as k
from keras.layers import Dense, Dropout, Activation, Concatenate
from keras.optimizers import SGD
import os
import argparse
import shutil
import math
import sys
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from tensorflow.python.ops import variables
import logging
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Merge, Flatten, Input, concatenate
from keras.regularizers import l1_l2
from keras.models import Model
from keras.optimizers import SGD
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, default='',
help='Base directory for the model.')
parser.add_argument(
'--model_type', type=str, default='deep',
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--train_epochs', type=int, default=800, help='Number of training epochs.')
parser.add_argument(
'--learning_rate', type=float, default=[0.01], nargs='+',
help='The learning_rate.')
parser.add_argument(
'--decay', type=float, default=0.000001,
help='The decay.')
parser.add_argument(
'--L1', type=float, default=0.0,
help='The l1 regularization coeff.')
parser.add_argument(
'--momentum', type=float, default=0.0,
help='The momentum.')
parser.add_argument(
'--L2', type=float, default=0.0,
help='The l2 regularization coeff.')
parser.add_argument(
'--batch_size', type=int, default=300, help='Number of examples per batch.')
parser.add_argument(
'--all_data', type=str, default='',
help='Path to the test data.')
parser.add_argument('--where', type=str, default='gpu', help='cpu of gpu')
parser.add_argument(
'--airport', type=int, default=0,
help='airport number.')
parser.add_argument(
'--root_dir', type=str, default='./', help='root directory')
# In[95]:
# In[145]:
# weightsVect = class_weight.compute_class_weight('balanced', [0,1,2,3,4,5,6], trainSet['ARRIVAL_DELAY_LABEL'])
# weightsVect
# # In[146]:
# weights = np.zeros(len(y_train))
# i=0
# for x in np.nditer(y_train):
# weights[i] = weightsVect[x]
# i+=1
if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
print(FLAGS)
if FLAGS.where == 'gpu':
num_GPU = 1
num_CPU = 2
if FLAGS.where == 'cpu':
num_CPU = 2
num_GPU = 0
config = tf.ConfigProto(device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})
session = tf.Session(config=config)
k.backend.set_session(session)
flights, trainSet, validationSet, testSet = setup_data(FLAGS.all_data, FLAGS.airport)
size = trainSet.shape[0]
val_size = validationSet.shape[0]
input_train_data = [trainSet['DESTINATION_AIRPORT'],
trainSet['TAIL_NUMBER'],
trainSet['FLIGHT_NUMBER'],
trainSet['AIRLINE'],
trainSet['DAY_OF_WEEK'],
trainSet['DAY'],
trainSet['MONTH'],
trainSet['SCHEDULED_ARRIVAL'].astype('float32').reshape((size, 1, 1)),
trainSet['SCHEDULED_DEPARTURE'].astype('float32').reshape((size, 1, 1)),
trainSet['DISTANCE'].astype('float32').reshape((size, 1, 1))]
input_val_train_data = [validationSet['DESTINATION_AIRPORT'],
validationSet['TAIL_NUMBER'],
validationSet['FLIGHT_NUMBER'],
validationSet['AIRLINE'],
validationSet['DAY_OF_WEEK'],
validationSet['DAY'],
validationSet['MONTH'],
validationSet['SCHEDULED_ARRIVAL'].astype('float32').reshape((val_size, 1, 1)),
validationSet['SCHEDULED_DEPARTURE'].astype('float32').reshape((val_size, 1, 1)),
validationSet['DISTANCE'].astype('float32').reshape((val_size, 1, 1))]
y_train = trainSet['ARRIVAL_DELAY_LABEL'].reshape((size, 1, 1))
y_validation = validationSet['ARRIVAL_DELAY_LABEL'].reshape((val_size, 1, 1))
for lr in FLAGS.learning_rate:
print('Fitting model with learning rate = ', lr)
sgd = SGD(lr=lr,
decay=FLAGS.decay,
momentum=FLAGS.momentum,
nesterov=True)
model = BuildFeedForwardNNClassifier([
flights['DISTANCE'].astype('float32'),
flights['SCHEDULED_DEPARTURE'].astype('float32'),
flights['SCHEDULED_ARRIVAL'].astype('float32')],
[
flights['MONTH'],
flights['DAY'],
flights['DAY_OF_WEEK'],
flights['AIRLINE'],
flights['FLIGHT_NUMBER'],
flights['TAIL_NUMBER'],
flights['DESTINATION_AIRPORT']],
flights['ARRIVAL_DELAY_LABEL'], 1.2, 1, 'sigmoid',sgd,
FLAGS.L1,
FLAGS.L2)
print(model.summary())
model_directory = FLAGS.model_dir+'_lr'+str(lr)
tbCallBack = TensorBoard(log_dir=model_directory,
histogram_freq=0,
write_graph=True,
write_images=False)
model.fit(x=input_train_data, y=y_train, callbacks=[tbCallBack], batch_size=FLAGS.batch_size,
epochs=FLAGS.train_epochs, validation_data=(input_val_train_data, y_validation), shuffle=True)
#/, sample_weight=weights
| 37.9 | 152 | 0.619951 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import keras as k
from keras.layers import Dense, Dropout, Activation, Concatenate
from keras.optimizers import SGD
import os
import argparse
import shutil
import math
import sys
import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from tensorflow.python.ops import variables
import logging
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from keras.callbacks import TensorBoard
from keras.models import Sequential
from keras.layers import Dense, Activation, Embedding, Merge, Flatten, Input, concatenate
from keras.regularizers import l1_l2
from keras.models import Model
from keras.optimizers import SGD
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, default='',
help='Base directory for the model.')
parser.add_argument(
'--model_type', type=str, default='deep',
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--train_epochs', type=int, default=800, help='Number of training epochs.')
parser.add_argument(
'--learning_rate', type=float, default=[0.01], nargs='+',
help='The learning_rate.')
parser.add_argument(
'--decay', type=float, default=0.000001,
help='The decay.')
parser.add_argument(
'--L1', type=float, default=0.0,
help='The l1 regularization coeff.')
parser.add_argument(
'--momentum', type=float, default=0.0,
help='The momentum.')
parser.add_argument(
'--L2', type=float, default=0.0,
help='The l2 regularization coeff.')
parser.add_argument(
'--batch_size', type=int, default=300, help='Number of examples per batch.')
parser.add_argument(
'--all_data', type=str, default='',
help='Path to the test data.')
parser.add_argument('--where', type=str, default='gpu', help='cpu of gpu')
parser.add_argument(
'--airport', type=int, default=0,
help='airport number.')
parser.add_argument(
'--root_dir', type=str, default='./', help='root directory')
def setup_data(flightsfile, airport):
flights = pd.read_csv(flightsfile)
flights = flights[flights['ORIGIN_AIRPORT'] == airport]
flights.reset_index(inplace=True)
grouped = flights.groupby('ARRIVAL_DELAY_LABEL')
#create a balanced train, validation and test set.
sampled_indices_train = []
sampled_indices_test = []
sampled_indices_validation = []
sampling_size = grouped.size().max()
for i in range(0, 7):
print('Sampling class', i)
sampled_indices_group = np.array(np.random.choice(grouped.groups[i], sampling_size))
sampled_indices_train_group, sampled_indices_test_group = train_test_split(sampled_indices_group , test_size=0.20, random_state=42)
sampled_indices_train_group, sampled_indices_validation_group = train_test_split(sampled_indices_train_group , test_size=0.20, random_state=42)
sampled_indices_train = np.concatenate([sampled_indices_train, sampled_indices_train_group])
sampled_indices_test = np.concatenate([sampled_indices_test, sampled_indices_test_group])
sampled_indices_validation = np.concatenate([sampled_indices_validation, sampled_indices_validation_group])
trainSet = flights.iloc[sampled_indices_train,:]
testSet = flights.iloc[sampled_indices_test,:]
validationSet = flights.iloc[sampled_indices_validation,:]
return flights, trainSet, validationSet, testSet
# In[95]:
def get_embedding_dimension(vocabulary_size):
return math.ceil(vocabulary_size**(1/float(4)))
def BuildFeedForwardNNClassifier(NonCategoricalInputs, CatInputs, Outputs, denseLayersFactor, nbHiddenLayers, activation, optimizer, L1, L2):
Inputs = []
sum_of_all_dimensions = 0
i__s = (1, 1,)
NonCatNs = []
for column in NonCategoricalInputs:
inputN = Input(shape=i__s, dtype=column.dtype, name='input_'+column.name)
print(inputN)
NonCatNs = [inputN]+NonCatNs
Inputs = [inputN]+Inputs
sum_of_all_dimensions += 1
i_s = (1,)
CatNs = []
for column in CatInputs:
number_of_categories = column.unique().size
inputN = Input(shape=i_s, dtype='int32', name='input_'+column.name)
embedding_dimension = get_embedding_dimension(number_of_categories)
encoderN = Embedding(output_dim=embedding_dimension, input_dim=number_of_categories, input_length=1, name='embedding_'+column.name)(inputN)
CatNs = [encoderN]+CatNs
print(encoderN)
Inputs = [inputN]+Inputs
sum_of_all_dimensions += embedding_dimension
all_inputs = NonCatNs+CatNs
I = concatenate(all_inputs)
dense_layer_dimension = math.ceil(denseLayersFactor * sum_of_all_dimensions)
x = I
for i in range(0, nbHiddenLayers):
x = Dense(dense_layer_dimension, activation='relu', init='glorot_normal', activity_regularizer=l1_l2(L1, L2), name='hidden_'+str(i))(x)
print('Building layer',i)
number_of_output_classes = Outputs.unique().size
main_output = Dense(number_of_output_classes, activation='softmax', name='main_output')(x)
model = Model(inputs=Inputs, outputs=[main_output])
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
# In[145]:
# weightsVect = class_weight.compute_class_weight('balanced', [0,1,2,3,4,5,6], trainSet['ARRIVAL_DELAY_LABEL'])
# weightsVect
# # In[146]:
# weights = np.zeros(len(y_train))
# i=0
# for x in np.nditer(y_train):
# weights[i] = weightsVect[x]
# i+=1
if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
print(FLAGS)
if FLAGS.where == 'gpu':
num_GPU = 1
num_CPU = 2
if FLAGS.where == 'cpu':
num_CPU = 2
num_GPU = 0
config = tf.ConfigProto(device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})
session = tf.Session(config=config)
k.backend.set_session(session)
flights, trainSet, validationSet, testSet = setup_data(FLAGS.all_data, FLAGS.airport)
size = trainSet.shape[0]
val_size = validationSet.shape[0]
input_train_data = [trainSet['DESTINATION_AIRPORT'],
trainSet['TAIL_NUMBER'],
trainSet['FLIGHT_NUMBER'],
trainSet['AIRLINE'],
trainSet['DAY_OF_WEEK'],
trainSet['DAY'],
trainSet['MONTH'],
trainSet['SCHEDULED_ARRIVAL'].astype('float32').reshape((size, 1, 1)),
trainSet['SCHEDULED_DEPARTURE'].astype('float32').reshape((size, 1, 1)),
trainSet['DISTANCE'].astype('float32').reshape((size, 1, 1))]
input_val_train_data = [validationSet['DESTINATION_AIRPORT'],
validationSet['TAIL_NUMBER'],
validationSet['FLIGHT_NUMBER'],
validationSet['AIRLINE'],
validationSet['DAY_OF_WEEK'],
validationSet['DAY'],
validationSet['MONTH'],
validationSet['SCHEDULED_ARRIVAL'].astype('float32').reshape((val_size, 1, 1)),
validationSet['SCHEDULED_DEPARTURE'].astype('float32').reshape((val_size, 1, 1)),
validationSet['DISTANCE'].astype('float32').reshape((val_size, 1, 1))]
y_train = trainSet['ARRIVAL_DELAY_LABEL'].reshape((size, 1, 1))
y_validation = validationSet['ARRIVAL_DELAY_LABEL'].reshape((val_size, 1, 1))
for lr in FLAGS.learning_rate:
print('Fitting model with learning rate = ', lr)
sgd = SGD(lr=lr,
decay=FLAGS.decay,
momentum=FLAGS.momentum,
nesterov=True)
model = BuildFeedForwardNNClassifier([
flights['DISTANCE'].astype('float32'),
flights['SCHEDULED_DEPARTURE'].astype('float32'),
flights['SCHEDULED_ARRIVAL'].astype('float32')],
[
flights['MONTH'],
flights['DAY'],
flights['DAY_OF_WEEK'],
flights['AIRLINE'],
flights['FLIGHT_NUMBER'],
flights['TAIL_NUMBER'],
flights['DESTINATION_AIRPORT']],
flights['ARRIVAL_DELAY_LABEL'], 1.2, 1, 'sigmoid',sgd,
FLAGS.L1,
FLAGS.L2)
print(model.summary())
model_directory = FLAGS.model_dir+'_lr'+str(lr)
tbCallBack = TensorBoard(log_dir=model_directory,
histogram_freq=0,
write_graph=True,
write_images=False)
model.fit(x=input_train_data, y=y_train, callbacks=[tbCallBack], batch_size=FLAGS.batch_size,
epochs=FLAGS.train_epochs, validation_data=(input_val_train_data, y_validation), shuffle=True)
#/, sample_weight=weights
| 3,265 | 0 | 73 |
f995e7a3b8f9bb10a3d7a7a0d85d4f6b627477bd | 1,140 | py | Python | CodinCloud/app.py | creativcoder/CodinCloud | 3fd45326b2c1d3f6afa9502f373f0ca14d4a5edd | [
"MIT"
] | 1 | 2020-02-28T09:46:54.000Z | 2020-02-28T09:46:54.000Z | CodinCloud/app.py | creativcoder/pyjudge | 3fd45326b2c1d3f6afa9502f373f0ca14d4a5edd | [
"MIT"
] | null | null | null | CodinCloud/app.py | creativcoder/pyjudge | 3fd45326b2c1d3f6afa9502f373f0ca14d4a5edd | [
"MIT"
] | null | null | null | import os
import config
import logging
import time
from task_handler import CodeTask
import subprocess
from flask import Flask,request,render_template,url_for,jsonify
logging.basicConfig(level=logging.DEBUG)
app=Flask(__name__)
app.secret = config.KEY
@app.route('/')
@app.route('/compile')
@app.route('/about')
@app.route('/signup')
@app.route('/login')
if __name__=='__main__':
app.run()
| 22.8 | 63 | 0.735965 | import os
import config
import logging
import time
from task_handler import CodeTask
import subprocess
from flask import Flask,request,render_template,url_for,jsonify
logging.basicConfig(level=logging.DEBUG)
app=Flask(__name__)
app.secret = config.KEY
@app.route('/')
def index():
return render_template('index.html')
@app.route('/compile')
def compile():
filename = request.args.get('filename','',type=str)
source_code = request.args.get('source_code','',type=str)
stdin = request.args.get('stdin','',type=str)
logging.debug('on app.py = '+str(len(stdin)))
logging.debug('on app.py = '+stdin)
new_task=CodeTask(1)
if(len(stdin)==0):
output_result = new_task.compile(filename,source_code)
else:
output_result = new_task.compile(filename,source_code,stdin)
logging.debug(output_result)
time.sleep(0.5)
return jsonify(result=output_result.replace('\n','<br>'))
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/signup')
def signup():
return render_template('signup.html')
@app.route('/login')
def login():
return render_template('login.html')
if __name__=='__main__':
app.run()
| 629 | 0 | 110 |
46d1a45812ff03a6562d2f9d405174d1b448a8d4 | 2,365 | py | Python | main.py | mrlevitas/Conference-Central | 9f141629e4a08c5e448f73395020990739ae8fc1 | [
"Apache-2.0"
] | 2 | 2015-07-23T00:50:51.000Z | 2015-11-17T04:46:15.000Z | main.py | mrlevitas/Conference-Central | 9f141629e4a08c5e448f73395020990739ae8fc1 | [
"Apache-2.0"
] | null | null | null | main.py | mrlevitas/Conference-Central | 9f141629e4a08c5e448f73395020990739ae8fc1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
from models import SpeakerDict
import logging
SPEAKER_IDENTIFIER = 1234
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/add_featured_speaker', AddFeaturedSpeaker),
], debug=True)
| 35.833333 | 93 | 0.65666 | #!/usr/bin/env python
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
from models import SpeakerDict
import logging
SPEAKER_IDENTIFIER = 1234
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
# TODO 1
# use _cacheAnnouncement() to set announcement in Memcache
ConferenceApi._cacheAnnouncement()
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class AddFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
"""Check Speaker being added in Session & add as featured in memcache if qualifies"""
speakerName = self.request.get('speakerName')
dictSpeaker = SpeakerDict.query(SpeakerDict.identifier == SPEAKER_IDENTIFIER).get()
# no dictionary exists, create one and initialize
if dictSpeaker is None:
dictSpeaker = SpeakerDict()
dictSpeaker.identifier = SPEAKER_IDENTIFIER
dictSpeaker.put()
logging.error('BEFORE dictSpeaker is= %s' % dictSpeaker)
# no dict. entry exists for speaker, initialize to 1
if dictSpeaker.speaker_num.get(speakerName) == None:
dictSpeaker.speaker_num[speakerName] = 1
# increment existing speaker entry by 1 and update memcache w/ new speaker
else:
dictSpeaker.speaker_num[speakerName] += 1
api = ConferenceApi()
api.cacheFeaturedSpeaker(speakerName)
logging.error('After dictSpeaker is= %s' % dictSpeaker)
dictSpeaker.put()
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/add_featured_speaker', AddFeaturedSpeaker),
], debug=True)
| 0 | 1,830 | 69 |
4fc38576370f82a0326f5979142443a209bd32d1 | 5,399 | py | Python | service_catalog/models/instance.py | Sispheor/squest | f852fe7986521a9d8cd9fb5eb0b56aa15f22548c | [
"Apache-2.0"
] | 112 | 2021-04-21T08:52:55.000Z | 2022-03-01T15:09:19.000Z | service_catalog/models/instance.py | Sispheor/squest | f852fe7986521a9d8cd9fb5eb0b56aa15f22548c | [
"Apache-2.0"
] | 216 | 2021-04-21T09:06:47.000Z | 2022-03-30T14:21:28.000Z | service_catalog/models/instance.py | Sispheor/squest | f852fe7986521a9d8cd9fb5eb0b56aa15f22548c | [
"Apache-2.0"
] | 21 | 2021-04-20T13:53:54.000Z | 2022-03-30T21:43:04.000Z | import logging
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django_fsm import FSMField, transition, post_transition
from profiles.models import BillingGroup
from profiles.models.role_manager import RoleManager
from . import Service, InstanceState
from .state_hooks import HookManager
logger = logging.getLogger(__name__)
post_transition.connect(HookManager.trigger_hook_handler, sender=Instance)
@receiver(pre_save, sender=Instance)
@receiver(post_save, sender=Instance)
| 36.979452 | 104 | 0.708094 | import logging
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django_fsm import FSMField, transition, post_transition
from profiles.models import BillingGroup
from profiles.models.role_manager import RoleManager
from . import Service, InstanceState
from .state_hooks import HookManager
logger = logging.getLogger(__name__)
class Instance(RoleManager):
name = models.CharField(verbose_name="Instance name", max_length=100)
spec = models.JSONField(default=dict, blank=True)
service = models.ForeignKey(Service, blank=True, null=True, on_delete=models.SET_NULL)
spoc = models.ForeignKey(User, null=True, help_text='Single Point Of Contact', verbose_name="SPOC",
on_delete=models.SET_NULL)
state = FSMField(default=InstanceState.PENDING)
billing_group = models.ForeignKey(
BillingGroup,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='instances',
related_query_name='instance'
)
def __str__(self):
return f"{self.name} (#{self.id})"
def clean(self):
if self.spec is None:
raise ValidationError({'spec': _("Please enter a valid JSON. Empty value is {} for JSON.")})
def opened_support_count(self):
from .support import SupportState
return self.supports.filter(state=SupportState.OPENED).count()
@transition(field=state, source=[InstanceState.PENDING, InstanceState.PROVISION_FAILED],
target=InstanceState.PROVISIONING)
def provisioning(self):
pass
@transition(field=state, source=InstanceState.PROVISIONING, target=InstanceState.PROVISION_FAILED)
def provisioning_has_failed(self):
pass
@transition(field=state, source=[InstanceState.PROVISION_FAILED, InstanceState.DELETE_FAILED,
InstanceState.UPDATE_FAILED,
InstanceState.PROVISIONING, InstanceState.UPDATING],
target=InstanceState.AVAILABLE)
def available(self):
pass
@transition(field=state, source=[InstanceState.AVAILABLE, InstanceState.UPDATE_FAILED],
target=InstanceState.UPDATING)
def updating(self):
pass
@transition(field=state, source=InstanceState.UPDATING, target=InstanceState.UPDATE_FAILED)
def update_has_failed(self):
pass
@transition(field=state, source=InstanceState.UPDATE_FAILED, target=InstanceState.UPDATING)
def retry_update(self):
pass
@transition(field=state, source=[InstanceState.AVAILABLE, InstanceState.DELETE_FAILED],
target=InstanceState.DELETING)
def deleting(self):
pass
@transition(field=state, source=InstanceState.DELETING, target=InstanceState.DELETE_FAILED)
def delete_has_failed(self):
pass
@transition(field=state, source=InstanceState.DELETING, target=InstanceState.DELETED)
def deleted(self):
pass
@transition(field=state, source=InstanceState.DELETED, target=InstanceState.ARCHIVED)
def archive(self):
pass
def reset_to_last_stable_state(self):
if self.state == InstanceState.PROVISION_FAILED:
self.state = InstanceState.PENDING
if self.state in [InstanceState.UPDATE_FAILED, InstanceState.DELETE_FAILED]:
self.state = InstanceState.AVAILABLE
def add_user_in_role(self, user, role_name):
super(Instance, self).add_user_in_role(user, role_name)
for request in self.request_set.all():
request.add_user_in_role(user, role_name)
def get_roles_of_users(self):
roles = super(Instance, self).get_roles_of_users()
roles[self.spoc.id].append("SPOC")
return roles
def remove_user_in_role(self, user, role_name=None):
super(Instance, self).remove_user_in_role(user, role_name)
for request in self.request_set.all():
request.remove_user_in_role(user, role_name)
def add_team_in_role(self, team, role_name):
super(Instance, self).add_team_in_role(team, role_name)
for request in self.request_set.all():
request.add_team_in_role(team, role_name)
def remove_team_in_role(self, team, role_name=None):
super(Instance, self).remove_team_in_role(team, role_name)
for request in self.request_set.all():
request.remove_team_in_role(team, role_name)
def assign_permission_to_spoc(self):
self.add_user_in_role(self.spoc, "Admin")
def remove_permission_to_spoc(self):
self.remove_user_in_role(self.spoc, "Admin")
post_transition.connect(HookManager.trigger_hook_handler, sender=Instance)
@receiver(pre_save, sender=Instance)
def change_spoc(sender, instance, **kwargs):
if instance.id:
old_instance = sender.objects.get(id=instance.id)
if old_instance.spoc != instance.spoc:
old_instance.remove_permission_to_spoc()
instance.assign_permission_to_spoc()
@receiver(post_save, sender=Instance)
def give_permissions_after_creation(sender, instance, created, **kwargs):
if created:
instance.assign_permission_to_spoc()
| 2,104 | 2,513 | 67 |
2ae5e60208764cccf17d08c4595e35389a7ddf40 | 6,154 | py | Python | pytf/loaders.py | Lothiraldan/pytf | 78165288d11b5eb16878ab035e83ece2c8afe0b2 | [
"MIT"
] | null | null | null | pytf/loaders.py | Lothiraldan/pytf | 78165288d11b5eb16878ab035e83ece2c8afe0b2 | [
"MIT"
] | 1 | 2015-01-25T16:37:09.000Z | 2015-01-25T16:37:09.000Z | pytf/loaders.py | Lothiraldan/pytf | 78165288d11b5eb16878ab035e83ece2c8afe0b2 | [
"MIT"
] | null | null | null | import inspect
import unittest
from itertools import product
from functools import partial
from pytf.core import Test
# Unittest compatibility loader
| 32.052083 | 83 | 0.607085 | import inspect
import unittest
from itertools import product
from functools import partial
from pytf.core import Test
class TestLoader(object):
level = 0
def load_object(self, obj, module):
if inspect.isfunction(obj):
return self._load_function(obj, module)
if inspect.isclass(obj):
return self._load_class(obj, module)
def _load_function(self, function, module):
test = Test('%s.%s' % (module.__name__, function.__name__), function)
if hasattr(function, "loaders"):
generators = [loader.load_function(test) for loader in
function.loaders]
for combination in product(*generators):
generator = TestGenerator.merge(combination)
yield generator.generate_test(test)
else:
yield test
def _load_class(self, klass, module):
if hasattr(klass, 'loaders'):
generators = [loader.load_class(klass) for loader in
klass.loaders]
for combination in product(*generators):
generator = TestGenerator.merge(combination)
generated_klass = generator.generate_class(klass)
for test in self._gen_test_for_class(generated_klass, module):
yield test
else:
for test in self._gen_test_for_class(klass, module):
yield test
def _gen_test_for_class(self, klass, module):
for test_method_name in filter(lambda x: x.startswith('test'),
dir(klass)):
if not inspect.ismethod(getattr(klass, test_method_name)):
continue
for test in self._load_method(klass, test_method_name, module):
yield test
def _load_method(self, klass, method_name, module):
instance = klass()
test_method = getattr(instance, method_name)
set_up_method = getattr(instance, 'setUp', None)
tear_down_method = getattr(instance, 'tearDown', None)
test_ids = [module.__name__, klass.__name__]
if hasattr(klass, 'id'):
test_ids.append(klass.id)
test_ids.append(method_name)
test_id = '.'.join(test_ids)
test = Test(test_id, test_method, set_ups=set_up_method,
tear_downs=tear_down_method)
if hasattr(instance, 'messages'):
for msg in instance.messages:
test.add_message(*msg)
if hasattr(test_method, 'loaders'):
generators = [loader.load_method(test) for loader in
test_method.loaders]
for combination in product(*generators):
generator = TestGenerator.merge(combination)
yield generator.generate_test(test)
else:
yield test
# Unittest compatibility loader
class UnittestLoader(TestLoader):
level = 20
def load_object(self, klass, module):
if not inspect.isclass(klass):
return
if not issubclass(klass, unittest.TestCase):
return
tests = []
for test_method_name in filter(lambda x: x.startswith('test'), dir(klass)):
instance = klass(test_method_name)
test_method = getattr(instance, test_method_name)
if not inspect.ismethod(test_method):
continue
set_up_method = getattr(instance, 'setUp')
tear_down_method = getattr(instance, 'tearDown')
test_id = "%s.%s.%s" % (module.__name__, klass.__name__,
test_method_name)
tests.append(Test(test_id, test_method, set_ups=set_up_method,
tear_downs=tear_down_method))
return tests
def partial_init(f, *args, **kwargs):
def wrapped(self, *fargs, **fkwargs):
newkwargs = kwargs.copy()
newkwargs.update(fkwargs)
return f(self, *(args + fargs), **newkwargs)
return wrapped
class TestGenerator(object):
def __init__(self, test_id, args=None, messages=None, set_ups=None,
tear_downs=None):
self.id = test_id
if args is None:
args = ((), {})
self.args = args
if messages is None:
messages = []
self.messages = messages
if set_ups is None:
set_ups = []
self.set_ups = set_ups
if tear_downs is None:
tear_downs = []
self.tear_downs = tear_downs
@staticmethod
def merge(generators):
test_ids = []
args = ([], {})
messages = []
set_ups = []
tear_downs = []
for generator in generators:
test_ids.append(generator.id)
args[0].extend(generator.args[0])
args[1].update(generator.args[1])
messages.extend(generator.messages)
set_ups.extend(generator.set_ups)
tear_downs.extend(generator.tear_downs)
return TestGenerator('.'.join(test_ids), args, messages, set_ups,
tear_downs)
def generate_test(self, test):
test_id = test.id + '.' + self.id
test_messages = test.messages + self.messages
test_set_ups = test.set_ups + self.set_ups
test_tear_downs = test.tear_downs + self.tear_downs
test_callback = partial(test.callback, *self.args[0], **self.args[1])
generated_test = Test(test_id, test_callback, test_set_ups,
test_tear_downs)
for message_title, message_content in test_messages:
generated_test.add_message(message_title, message_content)
return generated_test
def generate_class(self, klass):
# Generate a copy of initial klass with same name
generated_klass = type(klass.__name__, klass.__bases__,
dict(klass.__dict__))
generated_klass.id = self.id
generated_klass.messages = self.messages
generated_klass.set_ups = self.set_ups
generated_klass.tear_downs = self.tear_downs
generated_klass.__init__ = partial_init(generated_klass.__init__,
*self.args[0], **self.args[1])
return generated_klass
| 5,564 | 342 | 91 |
456a5ab7786cbb8846e2cc6043f542e7edcb6720 | 285 | py | Python | day2/day2_part2.py | sharkbound/AdventOfCode2018 | 856b38a3363963f62a4959c2e843128fab9f0d95 | [
"MIT"
] | null | null | null | day2/day2_part2.py | sharkbound/AdventOfCode2018 | 856b38a3363963f62a4959c2e843128fab9f0d95 | [
"MIT"
] | null | null | null | day2/day2_part2.py | sharkbound/AdventOfCode2018 | 856b38a3363963f62a4959c2e843128fab9f0d95 | [
"MIT"
] | null | null | null |
print(*solve([*map(str.rstrip, open('data.txt'))]), sep='')
| 28.5 | 71 | 0.494737 | def solve(ids):
for id1 in ids:
for id2 in ids:
if sum(1 for c1, c2 in zip(id1, id2) if c1 != c2) == 1:
yield from (c1 for c1, c2 in zip(id1, id2) if c1 == c2)
return
print(*solve([*map(str.rstrip, open('data.txt'))]), sep='')
| 201 | 0 | 22 |
abe104f63b8455f7a86c43ecd46b561a70cff5c3 | 2,221 | py | Python | tests/test_tools.py | binking/News_website | 7f1ed5d64e46ab5001660c7efb83419bb603da6f | [
"BSD-3-Clause"
] | null | null | null | tests/test_tools.py | binking/News_website | 7f1ed5d64e46ab5001660c7efb83419bb603da6f | [
"BSD-3-Clause"
] | null | null | null | tests/test_tools.py | binking/News_website | 7f1ed5d64e46ab5001660c7efb83419bb603da6f | [
"BSD-3-Clause"
] | null | null | null | from news_website.operations.tools import NewsUrlCache
if __name__=='__main__':
test_cache = TestNewsUrlCache()
test_cache.test_initialization()
test_cache.test_push()
test_cache.test_add_existed_ele()
test_cache.test_full()
test_cache.test_practice()
| 30.013514 | 76 | 0.586222 | from news_website.operations.tools import NewsUrlCache
class TestNewsUrlCache():
def test_initialization(self):
print("test_initialization")
test_size = 10
cache = NewsUrlCache(test_size)
print(cache)
assert '%s' % cache == "The Queue's length is 10, used 0: []"
assert cache.get_all() == []
assert cache.size == 10 and cache.is_empty() is True
def test_push(self):
print("test_push")
test_size = 10
cache = NewsUrlCache(test_size)
for i in range(5):
cache.push(i)
assert cache.is_contained(i) is True
assert cache.size == 10 and cache.get_all() == [0, 1, 2, 3, 4]
assert cache.is_full() is False
def test_add_existed_ele(self):
print("test_add_existed_ele")
test_size = 10
cache = NewsUrlCache(test_size)
for i in range(5):
cache.push(i)
cache.push(0) # 0 is exited
print(cache)
assert cache.__len__() == 5 and cache.count(0) == 1 # unique
def test_full(self):
print("test_full")
test_size = 10
cache = NewsUrlCache(test_size)
for i in range(10):
cache.push(i)
print(cache)
assert cache.is_full() is True
cache.push(10)
assert cache[0] == 1 and cache[-1] == 10
assert cache.size == 10 and cache.is_full() is True
def test_practice(self):
print("test_practice")
test_size = 10
cache = NewsUrlCache(test_size)
for i in range(10):
cache.push(i)
cache.push(10)
assert cache[-1] == 10 and cache[0] == 1 and cache.is_full() is True
cache.push(9)
assert cache[-1] == 10 and cache[0] == 1 and cache.is_full() is True
cache.push(3)
assert cache[-1] == 10 and cache[0] == 1 and cache.is_full() is True
cache.push(20)
assert cache[-1] == 20 and cache[0] == 2 and cache.is_full() is True
if __name__=='__main__':
test_cache = TestNewsUrlCache()
test_cache.test_initialization()
test_cache.test_push()
test_cache.test_add_existed_ele()
test_cache.test_full()
test_cache.test_practice()
| 1,780 | 4 | 158 |
9c78c6b13f1073b29f683cbc542eacdcd8e88433 | 14,937 | py | Python | tests/test_media.py | flaviocpontes/ffmpy | 0d88bcbeaac71ab9bcc18cdfa5594608d4207c93 | [
"MIT"
] | 1 | 2017-05-23T22:28:03.000Z | 2017-05-23T22:28:03.000Z | tests/test_media.py | flaviocpontes/ffmpy | 0d88bcbeaac71ab9bcc18cdfa5594608d4207c93 | [
"MIT"
] | 1 | 2017-01-23T16:40:42.000Z | 2017-01-23T16:40:42.000Z | tests/test_media.py | flaviocpontes/ffmpymedia | 0d88bcbeaac71ab9bcc18cdfa5594608d4207c93 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ffmpymedia import __author__, __version__, __copyright__, __package__
from os.path import join as joinpath
import unittest
from ffmpymedia.media import *
from tests import TEST_FILE_PATH
class TestMediaStream(unittest.TestCase):
"""
Testes de criação dos objetos de fluxo de mídia
"""
class TestMediaStreamTemplate(unittest.TestCase):
"""
Testes de criação dos objetos template de fluxo de mídia
"""
class TestMediaStreamTemplateAnalysis(unittest.TestCase):
"""
Testes das funcionalidades de análise dos templates de fluxos de mídia
"""
def test_empty_template_equal1(self):
"""
Deve retornar verdadeiro sempre pois o template não faz nenhuma exigência
"""
self.assertTrue(MediaStreamTemplate(**{'type': 'video'}) == MediaStream(**{'type': 'video',
'sample_format': 'yuv420p', 'width': '66718', 'height': '643816hsa',
'blablabla': 'sakjhfashkjf'}))
def test_template_equality(self):
"""
Testa um Template com todas as informações
"""
self.assertTrue(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video',
'height': '1080'}) ==
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video', 'profile': 'Main',
'codec': 'mpeg2video', 'height': '1080'}))
def test_full_template_equal3(self):
"""
Testa Um media stream sem uma chave que esta no Template
"""
self.assertFalse(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'}) ==\
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'codec': 'mpeg2video', 'height': '1080'}))
def test_stream_difference_with_different_height(self):
"""
Testa um MediaFile diferente
"""
self.assertTrue(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1280', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '720'})))
def test_stream_difference_with_equal_streams(self):
"""
Testa a diferença
"""
self.assertFalse(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}})))
def test_stream_difference_with_different_metadata(self):
"""
Testa um MediaFile diferente
"""
self.assertEqual(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Different Metadata!'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}),
include_metadata=True),
{'metadata': {'title': ('Different Metadata!', 'Test with Metadata')}})
def test_stream_difference_with_different_dispositions(self):
"""
Testa a diferença
"""
self.assertEqual(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 0, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}})),
{'disposition': {'default': (0, 1)}})
class TestMediaFileCreation(unittest.TestCase):
"""
Testes das funcionalidades da classe MediaFile
""" | 57.01145 | 119 | 0.514695 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ffmpymedia import __author__, __version__, __copyright__, __package__
from os.path import join as joinpath
import unittest
from ffmpymedia.media import *
from tests import TEST_FILE_PATH
class TestMediaStream(unittest.TestCase):
"""
Testes de criação dos objetos de fluxo de mídia
"""
def test_invalid_media_stream(self):
self.assertIsNone(MediaStream())
def test_invalid_media_stream2(self):
self.assertIsNone(MediaStream(**{'type': 'Invalid'}))
def test_video_stream_creation(self):
self.assertIsInstance(MediaStream(**{'type': 'video'}), MediaStream)
def test_video_stream_type_repr(self):
self.assertEqual(repr(MediaStream(**{'type': 'video'})), "MediaStream(**{'type': 'video'})")
def test_video_stream_repr2(self):
self.assertDictEqual({'sample_format': 'yuv420p', 'width': '1920', 'type': 'video', 'profile': 'Main',
'codec': 'mpeg2video', 'height': '1080', 'metadata': {'encoder': 'FFMPEG'}},
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'encoder': 'FFMPEG'}}).__dict__)
def test_video_stream_str(self):
self.assertEqual(str(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'})),
"video Stream: codec: mpeg2video, height: 1080, profile: Main, sample_format: yuv420p, "
"type: video, width: 1920")
def test_audio_stream_creation(self):
self.assertIsInstance(MediaStream(**{'type': 'audio'}), MediaStream)
def test_audio_stream_type_repr(self):
self.assertEqual(repr(MediaStream(**{'type': 'audio'})), "MediaStream(**{'type': 'audio'})")
def test_image_stream_creation(self):
self.assertIsInstance(MediaStream(**{'type': 'image'}), MediaStream)
def test_image_stream_type_repr(self):
self.assertEqual(repr(MediaStream(**{'type': 'image'})), "MediaStream(**{'type': 'image'})")
def test_subtitle_stream_creation(self):
self.assertIsInstance(MediaStream(**{'type': 'subtitle'}), MediaStream)
def test_subtitle_type_repr(self):
self.assertEqual(repr(MediaStream(**{'type': 'subtitle'})), "MediaStream(**{'type': 'subtitle'})")
def test_data_stream_creation(self):
self.assertIsInstance(MediaStream(**{'type': 'data'}), MediaStream)
def test_data_type_repr(self):
self.assertEqual(repr(MediaStream(**{'type': 'data'})), "MediaStream(**{'type': 'data'})")
def test_attachment_stream_creation(self):
self.assertIsInstance(MediaStream(**{'type': 'attachment'}), MediaStream)
def test_attachment_type_repr(self):
self.assertEqual(repr(MediaStream(**{'type': 'attachment'})), "MediaStream(**{'type': 'attachment'})")
class TestMediaStreamTemplate(unittest.TestCase):
"""
Testes de criação dos objetos template de fluxo de mídia
"""
def test_invalid_media_stream_template(self):
self.assertIsNone(MediaStreamTemplate())
def test_invalid_media_stream_template2(self):
self.assertIsNone(MediaStream(**{'type': 'Invalid'}))
def test_video_stream_template_creation(self):
self.assertIsInstance(MediaStreamTemplate(**{'type': 'video'}), MediaStreamTemplate)
def test_video_stream_template_type_repr(self):
self.assertEqual(repr(MediaStreamTemplate(**{'type': 'video'})), "MediaStreamTemplate(**{'type': 'video'})")
def test_video_stream_template_repr2(self):
self.assertDictEqual({'sample_format': 'yuv420p', 'width': '1920', 'type': 'video', 'profile': 'Main',
'codec': 'mpeg2video', 'height': '1080'},
MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video',
'height': '1080'}).__dict__)
def test_audio_stream_template_creation(self):
self.assertIsInstance(MediaStreamTemplate(**{'type': 'audio'}), MediaStreamTemplate)
def test_audio_stream_template_type_repr(self):
self.assertEqual(repr(MediaStreamTemplate(**{'type': 'audio'})), "MediaStreamTemplate(**{'type': 'audio'})")
def test_video_stream_template_str(self):
self.assertEqual(str(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'})),
"video stream template: codec: mpeg2video, height: 1080, profile: Main, sample_format: "
"yuv420p, type: video, width: 1920")
def test_image_stream_creation(self):
self.assertIsInstance(MediaStreamTemplate(**{'type': 'image'}), MediaStreamTemplate)
def test_image_stream_type_repr(self):
self.assertEqual(repr(MediaStreamTemplate(**{'type': 'image'})), "MediaStreamTemplate(**{'type': 'image'})")
def test_subtitle_stream_creation(self):
self.assertIsInstance(MediaStreamTemplate(**{'type': 'subtitle'}), MediaStreamTemplate)
def test_subtitle_type_repr(self):
self.assertEqual(repr(MediaStreamTemplate(**{'type': 'subtitle'})),
"MediaStreamTemplate(**{'type': 'subtitle'})")
def test_data_stream_creation(self):
self.assertIsInstance(MediaStreamTemplate(**{'type': 'data'}), MediaStreamTemplate)
def test_data_type_repr(self):
self.assertEqual(repr(MediaStreamTemplate(**{'type': 'data'})), "MediaStreamTemplate(**{'type': 'data'})")
def test_attachment_stream_creation(self):
self.assertIsInstance(MediaStreamTemplate(**{'type': 'attachment'}), MediaStreamTemplate)
def test_attachment_type_repr(self):
self.assertEqual(repr(MediaStreamTemplate(**{'type': 'attachment'})),
"MediaStreamTemplate(**{'type': 'attachment'})")
class TestMediaStreamTemplateAnalysis(unittest.TestCase):
"""
Testes das funcionalidades de análise dos templates de fluxos de mídia
"""
def test_empty_template_equal1(self):
"""
Deve retornar verdadeiro sempre pois o template não faz nenhuma exigência
"""
self.assertTrue(MediaStreamTemplate(**{'type': 'video'}) == MediaStream(**{'type': 'video',
'sample_format': 'yuv420p', 'width': '66718', 'height': '643816hsa',
'blablabla': 'sakjhfashkjf'}))
def test_minimal_template_equality(self):
self.assertTrue(MediaStreamTemplate(**{'type': 'video'}) == MediaStream(**{'type': 'video',
'sample_format': 'yuv420p', 'width': '66718', 'height': '643816hsa',
'blablabla': 'sakjhfashkjf'}))
def test_template_equality(self):
"""
Testa um Template com todas as informações
"""
self.assertTrue(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video',
'height': '1080'}) ==
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video', 'profile': 'Main',
'codec': 'mpeg2video', 'height': '1080'}))
def test_full_template_equal2(self):
self.assertTrue(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'}) ==\
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video', 'profile': 'Main',
'codec': 'mpeg2video', 'height': '1080'}))
def test_full_template_equal3(self):
"""
Testa Um media stream sem uma chave que esta no Template
"""
self.assertFalse(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'}) ==\
MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'codec': 'mpeg2video', 'height': '1080'}))
def test_stream_difference_with_different_height(self):
"""
Testa um MediaFile diferente
"""
self.assertTrue(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080'}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1280', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '720'})))
def test_stream_difference_with_equal_streams(self):
"""
Testa a diferença
"""
self.assertFalse(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}})))
def test_stream_difference_with_different_metadata(self):
"""
Testa um MediaFile diferente
"""
self.assertEqual(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Different Metadata!'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}),
include_metadata=True),
{'metadata': {'title': ('Different Metadata!', 'Test with Metadata')}})
def test_stream_difference_with_different_dispositions(self):
"""
Testa a diferença
"""
self.assertEqual(MediaStreamTemplate(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 1, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}}).
difference(MediaStream(**{'sample_format': 'yuv420p', 'width': '1920', 'type': 'video',
'profile': 'Main', 'codec': 'mpeg2video', 'height': '1080',
'metadata': {'title': 'Test with Metadata'},
'disposition': {"default": 0, "dub": 0, "original": 0, "comment": 0,
"lyrics": 0, "karaoke": 0, "forced": 1,
"hearing_impaired": 0, "visual_impaired": 0,
"clean_effects": 0, "attached_pic": 0}})),
{'disposition': {'default': (0, 1)}})
class TestMediaFileCreation(unittest.TestCase):
"""
Testes das funcionalidades da classe MediaFile
"""
def setUp(self):
self.TEST_FILE = os.path.join(TEST_FILE_PATH, 'SIN001 Sinuca.mp4')
def test_nonexistente_file(self):
self.assertIsNone(MediaFile(**{'filename': 'NAOEXISTE'}))
def test_insufficient_parameters(self):
file_params = probe.MediaProbe.get_media_file_input_params(self.TEST_FILE)
file_params.__delitem__('duration')
self.assertIsNone(MediaFile(**file_params)) | 6,008 | 0 | 998 |
44c52430e10a12b357c944589a34aab4b48be394 | 3,045 | py | Python | metactical/metactical/report/pos_discount_report/pos_discount_report.py | aisenyi/metactical | 30a55f38e48f3f8512e92de370b39cf97f452b1d | [
"MIT"
] | null | null | null | metactical/metactical/report/pos_discount_report/pos_discount_report.py | aisenyi/metactical | 30a55f38e48f3f8512e92de370b39cf97f452b1d | [
"MIT"
] | null | null | null | metactical/metactical/report/pos_discount_report/pos_discount_report.py | aisenyi/metactical | 30a55f38e48f3f8512e92de370b39cf97f452b1d | [
"MIT"
] | null | null | null | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.reportview import build_match_conditions
from frappe.utils import flt, cint, getdate, now, date_diff
| 28.457944 | 115 | 0.655172 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.desk.reportview import build_match_conditions
from frappe.utils import flt, cint, getdate, now, date_diff
def execute(filters=None):
if not filters:
filters = {}
columns = get_column()
data=[]
item_sales = get_data(filters)
for d in item_sales:
if d.item_code != "9999-tempt" and d.variant_of != "9999-tempt": #remove shipping items from list
if d.get('price_list_rate') > 0:
price_list_rate = frappe.db.get_value("Item Price",
{"price_list": d.get('selling_price_list'), "selling": 1, "item_code": d.get('item_code')}, "price_list_rate")
#rate_discount = (d.get('price_list_rate') - d.get('rate'))/d.get('price_list_rate')
if price_list_rate is not None:
rate_discount = (price_list_rate - d.get('rate'))/d.get('price_list_rate')
if rate_discount >= 0.15:
row = {}
row['si_date'] = d.posting_date
row['warehouse'] = d.warehouse
row['si_name'] = d.name
row['ifw_retailskusuffix'] = d.ifw_retailskusuffix
row['item_code'] = d.item_code
row['item_name'] = d.item_name
row['qty'] = d.qty
row['rate'] = d.rate
row['price_list_rate'] = price_list_rate
row['discount_percentage'] = rate_discount * 100
row['uom'] = d.uom
row['ifw_location'] = d.ifw_location
data.append(row)
return columns, data
def get_column():
return [
{
"fieldname":"warehouse",
"label": "Pos location",
"fieldtype": "Data",
'width': 200
},
{
"fieldname": "si_name",
"label": "Invoice Number",
"fieldtype": 'Link',
'options': 'Sales Invoice',
'width': 120
},
{
"fieldname":"ifw_retailskusuffix",
"label": "Retail SkuSuffix",
"fieldtype": "Data",
'width': 200
},
{
"fieldname":"item_name",
"label": "Item Name",
"fieldtype": "Data",
'width': 200
},
{
"fieldname":"price_list_rate",
"label": "PriceList Price",
"fieldtype": "Currency",
"width": 120,
},
{
"fieldname":"rate",
"label": "Discount Price sold for",
"fieldtype": "Currency",
"width": 120,
},
{
"fieldname":"discount_percentage",
"label": "Percentage Amount(%)",
"fieldtype": "Percent",
'width': 120
},
]
def get_data(filters):
where_filter = {"from_date": filters.from_date, "to_date": filters.to_date}
where = ""
data = frappe.db.sql("""select c.item_code, c.item_name, c.qty, c.price_list_rate, c.rate, c.discount_percentage,
c.uom, c.ifw_retailskusuffix, c.ifw_location, c.warehouse,
p.name, p.posting_date, p.selling_price_list, i.variant_of
from `tabSales Invoice Item` c inner join `tabSales Invoice` p on p.name = c.parent
inner join `tabItem` i on c.item_code = i.name
where p.docstatus = 1 and p.posting_date BETWEEN %(from_date)s AND %(to_date)s
order by c.warehouse, p.posting_date
"""+ where, where_filter, as_dict=1)
return data
| 2,660 | 0 | 69 |
4dfc2eeaf0046cbddf4ddc2b70d0587937a5a5af | 1,900 | py | Python | tests/test_plugin.py | Jesse-Weinstein-Zonar/nose-watch | 4aefd6f3a18f079c08aaaf68a2d5099fef374125 | [
"BSD-2-Clause"
] | 43 | 2015-01-19T01:44:42.000Z | 2022-03-26T06:28:22.000Z | tests/test_plugin.py | Jesse-Weinstein-Zonar/nose-watch | 4aefd6f3a18f079c08aaaf68a2d5099fef374125 | [
"BSD-2-Clause"
] | 13 | 2015-02-13T10:09:51.000Z | 2018-10-22T02:23:51.000Z | tests/test_plugin.py | Jesse-Weinstein-Zonar/nose-watch | 4aefd6f3a18f079c08aaaf68a2d5099fef374125 | [
"BSD-2-Clause"
] | 9 | 2015-01-10T18:39:54.000Z | 2020-07-06T19:57:18.000Z | """
Main plugin tests.
.. note:: Please to not run nosetests with this plugin for testing the plugin
itself. Also, mock.patch is not much help here as it would try to mock the
module that would be already imported by nose itself if plugin was installed
globally (it would take module from sys.modules).
"""
import unittest
from mock import Mock, patch
from nosewatch.plugin import WatchPlugin
| 36.538462 | 79 | 0.637368 | """
Main plugin tests.
.. note:: Please to not run nosetests with this plugin for testing the plugin
itself. Also, mock.patch is not much help here as it would try to mock the
module that would be already imported by nose itself if plugin was installed
globally (it would take module from sys.modules).
"""
import unittest
from mock import Mock, patch
from nosewatch.plugin import WatchPlugin
class TestWatchPlugin(unittest.TestCase):
def setUp(self):
self.plugin = WatchPlugin()
self.plugin.stdout = Mock()
self.plugin.argv = ['program', 'arg1', '--with-watch', 'arg3', 'arg4']
def test_finalize(self):
self.plugin.call = Mock()
self.plugin.finalize(Mock())
watchcmd = 'clear && program arg1 arg3 arg4'
self.plugin.call.assert_called_once_with([
'watchmedo', 'shell-command', '-c', watchcmd,
'-R', '-p', '*.py', '-W', '.'])
def test_finalize_interrupted(self):
self.plugin.call = Mock(side_effect=KeyboardInterrupt)
self.plugin.finalize(Mock())
self.plugin.stdout.write.assert_called_once_with('\nStopped\n')
def test_works_even_when_tests_mock_sys_argv(self):
with patch('sys.argv') as argv:
argv.return_value = ['mocked']
self.plugin.call = Mock()
self.plugin.finalize(Mock())
watchcmd = 'clear && program arg1 arg3 arg4'
self.plugin.call.assert_called_once_with([
'watchmedo', 'shell-command', '-c', watchcmd,
'-R', '-p', '*.py', '-W', '.'])
def test_works_even_when_tests_mock_sys_stdout(self):
with patch('sys.stdout') as stdout:
stdout.return_value = ['mocked']
self.plugin.call = Mock(side_effect=KeyboardInterrupt)
self.plugin.finalize(Mock())
self.plugin.stdout.write.assert_called_once_with('\nStopped\n')
| 1,318 | 20 | 158 |
fcbd5aa0d4005b06db1232244b372533aa0b3175 | 12,585 | py | Python | src/treeOps/graph.py | seyedb/tree-ops | 787640df0bfeb0f33ca81dbca91dead5ffeb6f85 | [
"MIT"
] | null | null | null | src/treeOps/graph.py | seyedb/tree-ops | 787640df0bfeb0f33ca81dbca91dead5ffeb6f85 | [
"MIT"
] | null | null | null | src/treeOps/graph.py | seyedb/tree-ops | 787640df0bfeb0f33ca81dbca91dead5ffeb6f85 | [
"MIT"
] | null | null | null | from enum import Enum
from collections import deque, defaultdict
from functools import partial
class node_status(Enum):
"""List of possible visit status of graph nodes (graph traversal)."""
UNVISITED = 0
VISITED = 1
VISITING = 2
| 34.765193 | 105 | 0.552483 | from enum import Enum
from collections import deque, defaultdict
from functools import partial
class node_status(Enum):
"""List of possible visit status of graph nodes (graph traversal)."""
UNVISITED = 0
VISITED = 1
VISITING = 2
class graph(object):
def __init__(self):
"""Initializes a graph with a nested defaultdict of graphNodes.
example: {'A': {'B': [2, 3], 'C': [5, 1]}}
"""
self.vertices = defaultdict(partial(defaultdict, list))
class graphNode(object):
def __init__(self, data=None, status=node_status.UNVISITED):
"""Initializes a graph node.
Attributes:
data (any type): the node value.
children (dict): a dictionary of the children (adjacent nodes).
status (node_status): the visit status of the node (graph traversal).
distance (float): the distance from a source node to this node.
previous (graphNode): predecessor of this node in an optimal path from
a predefined source in a shortest path algorithm.
"""
self.data = data
self.children = defaultdict(list)
self.status = status
self.distance = float('inf')
self.previous = None
# comparison operators
def __lt__(self, other):
return self.data < other.data
def __le__(self, other):
return self.data < other.data or self.data == other.data
# the following are optional
# def __ne__(self, other):
# return self.data != other.data
#
# def __gt__(self, other):
# return self.data > other.data
#
# def __ge__(self, other):
# return self.data > other.data or self.data == other.data
def __str__(self):
"""Prints the data assigned to this node, and list of its children with the weight of the
edges connecting them.
"""
res = "'{}': {}".format(self._getData(),
[[node._getData(), self._getWeight(node)] for node in self._getChildren()])
return res
def _getStatus(self):
"""Returns the current visit status of a node."""
return self.status
def _setStatus(self, status):
"""Sets the visit status of a node.
Args:
status (node_status): the new visit status.
Returns:
(graphNode) the node with its status updated with the given status.
"""
self.status = status
def _getData(self):
"""Returns the data assigned to this node."""
return self.data
def _setData(self, data):
"""Sets the data of this node to a given value.
Args:
data (node val data type): the new data to be assigned to this node.
Returns:
(graphNode) the node with updated data.
"""
self.data = data
def _addAdjNode(self, node, weight=0):
"""Adds an adjacent node to this node.
Args:
node (graphNode): the new adjacent node.
weight (int): the weight of the edge connecting this node to the new node.
Returns:
(graphNode) this node with its dictionary of children updated with a new node added.
"""
self.children[node].append(weight) if weight not in self.children[node] \
else self.children[node]
def _getChildren(self):
"""Returns the keys of the dict of children of this node.
Returns:
(dict_keys) keys of the dictionary of children of this node.
"""
return self.children.keys()
def _getDistance(self):
"""Returns the value of the distance instance attribute of this node."""
return self.distance
def _setDistance(self, distance):
"""Sets the instance attribute distance to a given value.
Args:
distance (float): the new distance.
Returns:
(graphNode) the node with its distance updated with the given value.
"""
self.distance = distance
def _getPrevious(self):
"""Returns the value of the previous instance attribute of this node."""
return self.previous
def _setPrevious(self, node):
"""Sets the instance attribute previous to a given node.
Args:
node (graphNode): the new previous.
Returns:
(graphNode) this node with its previous updated with the given node.
"""
self.previous = node
def _getWeight(self, adjNode):
"""Returns the weight of the edge between this node and the given node if they are adjacent,
otherwise returns None.
"""
if adjNode in self.children:
return self.children[adjNode]
else:
return None
def _isAdjacent(self, node):
"""Returns True of this node is adjacent to the give node, else False.
Returns:
(boolean) whether or not this node and the given node are adjacent.
"""
return node in self.children
def __str__(self):
"""Returns a string representing the vertices and edges of this graph."""
res = ''
for vertex in self._getVerticesDict():
res += vertex.__str__() + '\n'
return res
def reset(self):
"""Resets the attributes of all the nodes in a graph to their default values."""
for vx in self._getVerticesDict():
vx._setStatus(node_status.UNVISITED)
vx._setDistance(float('inf'))
vx._setPrevious(None)
def __contains__(self, VxData):
"""Checks whether or not the graph contains a given data.
Args:
VxData (node val data type): the data to be searched for.
Returns:
(boolean) True if VxData is found, otherwise False.
"""
data = [k._getData() for k in self._getVerticesDict().keys()]
return VxData in data
def _getVerticesDict(self):
"""(helper function) Returns a nested (default) dictionary of the vertices of the graph.
Returns:
(defaultdict) the default dictionary of vertices of the graph.
"""
return self.vertices
def _getEdges(self):
"""Returns a set of edges of the graph.
NOTE:
- If there is an edge between v1, v2 with weight w, both (v1, v2, w) and (v2, v1, w) are
included. This is necessary for Bellman-Ford shortest path algorithm and also make it general
for directed graphs.
Returns:
(set of tuples): set of edges represented as (v1, v2, w).
"""
vertices = self._getVerticesDict()
edges = set()
for vx, vxchildren in vertices.items():
for child, weight in vxchildren.items():
# uncomment the following line to have only unique edges
# tmp = sorted([vx, child])
tmp = [vx, child]
for i in range(len(weight)):
edges.add((tmp[0], tmp[1], weight[i]))
return edges
def _getVertex(self, VxData):
"""(helper function) Returns a graph node that holds the given data.
Args:
VxData (node val data type): the data to be searched for.
Returns:
(graphNode) the graph node from this graph that has data equal to the given data.
"""
if VxData not in self:
print("Vertex {} doesn't exist.".format(VxData))
return None
for vertex in self._getVerticesDict():
if vertex._getData() == VxData:
return vertex
def add_vertex(self, VxData):
"""Adds a vertex to the graph.
Args:
VxData (node val data type): the data to be added to the graph.
Returns:
(graph) this graph with a new node added to it as a new vertex.
"""
vx = self.graphNode(VxData)
if VxData in self._getVerticesDict():
print("Vertex {} already exists.".format(VxData))
else:
self.vertices[vx] = defaultdict(list)
def add_edge(self, fromVxData, toVxData, weight=0.0, directed=False):
"""Adds an edge to the graph given the data stored in the nodes at its ends.
Args:
fromVxData (node val data type): data of the "from" node.
toVxData (node val data type): data of the "to" node.
weight (float): weight of the edge.
directed (boolean): whether or not the edge is directed from fromVxData to toVxData.
Returns:
(graph) this graph updated with a new edge added to it.
"""
if fromVxData not in self:
print("Adding edge failed! Vertex {} doesn't exist.".format(fromVxData))
return
if toVxData not in self:
print("Adding edge failed! Vertex {} doesn't exist.".format(toVxData))
return
a = self._getVertex(fromVxData)
b = self._getVertex(toVxData)
edges = self._getEdges()
msg = "The edge ({}, {}, {}) already exists!"
msgcmpl = msg + " ({}, {}, {}) will be created."
if directed:
if (a, b, weight) in edges:
print(msg.format(fromVxData, toVxData, weight))
return
a._addAdjNode(b, weight)
self.vertices[a][b].append(weight)
elif not directed:
if (a, b, weight) in edges and (b, a, weight) not in edges:
print(msgcmpl.format(fromVxData, toVxData, weight, toVxData, fromVxData, weight))
b._addAdjNode(a, weight)
self.vertices[b][a].append(weight)
return
elif (b, a, weight) in edges and (a, b, weight) not in edges:
print(msgcmpl.format(toVxData, fromVxData, weight, fromVxData, toVxData, weight))
a._addAdjNode(b, weight)
self.vertices[a][b].append(weight)
return
elif (a, b, weight) in edges and (b, a, weight) in edges:
return
a._addAdjNode(b, weight)
b._addAdjNode(a, weight)
self.vertices[a][b].append(weight)
self.vertices[b][a].append(weight)
def _isMultigraph(self):
"""(helper function) Checks whether or not the graph is a multigraph."""
vertices = self._getVerticesDict()
for vx, vxchildren in vertices.items():
for weight in vxchildren.values():
if len(weight) > 1:
return True
return False
def DFS(self, start, path=None):
"""Depth-First Search (DFS).
Args:
start (node val data type): the key value of the node where the search starts.
path (list of graphNode): the DFS path (empty path to be filled with nodes).
Returns:
(list of graphNode) the full DFS path.
"""
if path is None:
path = []
s = self._getVertex(start)
if s is None:
return
if len(path) == 0: path.append(s)
s._setStatus(node_status.VISITED)
for child in s._getChildren():
if child._getStatus() == node_status.UNVISITED:
path.append(child)
path = self.DFS(child._getData(), path)
return path
def BFS(self, start):
"""Breadth-First Search (BFS).
Args:
start (node val data type): the key value of the node where the search starts.
Returns:
(list of graphNode) the full BFS path.
"""
s = self._getVertex(start)
if s is None:
return
s._setStatus(node_status.VISITED)
path = []
Q = deque()
Q.append(s)
while Q:
k = Q.popleft()
for child in k._getChildren():
if child._getStatus() == node_status.UNVISITED:
child._setStatus(node_status.VISITED)
Q.append(child)
k._setStatus(node_status.VISITED)
path.append(k)
return path
| 117 | 12,199 | 23 |
b0bb9729a517899f54ff6b85a33fee32daea063e | 10,198 | py | Python | keg_bouncer/model/mixins.py | level12/keg-bouncer | 87482bfe22b943397765cb661f8088a19afe592b | [
"BSD-3-Clause"
] | null | null | null | keg_bouncer/model/mixins.py | level12/keg-bouncer | 87482bfe22b943397765cb661f8088a19afe592b | [
"BSD-3-Clause"
] | 16 | 2015-12-11T05:05:53.000Z | 2017-04-04T19:01:43.000Z | keg_bouncer/model/mixins.py | level12/keg-bouncer | 87482bfe22b943397765cb661f8088a19afe592b | [
"BSD-3-Clause"
] | 1 | 2016-12-12T22:48:45.000Z | 2016-12-12T22:48:45.000Z | from __future__ import absolute_import
from six import text_type
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.inspection import inspect
import sqlalchemy.orm as saorm
from . import entities as ents
from . import interfaces
class PermissionMixin(interfaces.HasPermissions, KegBouncerMixin):
"""A mixin that adds permission facilities to a SQLAlchemy declarative user entity.
A class which mixes this in must provide one of the following:
* An `id` column member which represents the primary key. The actual column may have any
name and any type.
* Or, a `primary_key_column` class variable that gives the name of the primary key column
as a string.
"""
# Instances will shadow this when populating their own cache.
_cached_permissions = None
@declared_attr
@declared_attr
def user_user_group_map(cls):
"""A linking (mapping) table between users and user groups."""
return ents.make_user_to_user_group_link(cls._primary_key_column(), cls.__tablename__)
@hybrid_property
@hybrid_property
def permissions_query(self):
"""A query that maps users to permissions through all possible avenues."""
return ents.joined_permission_query().join(
self.user_user_group_map,
sa.or_(
self.user_user_group_map.c.user_group_id
== ents.user_group_permission_map.c.user_group_id, # noqa
self.user_user_group_map.c.user_group_id
== ents.user_group_bundle_map.c.user_group_id # noqa
)
)
@hybrid_property
def permissions_with_user_id_query(self):
"""
Like `permissions_query` but adds a column called `user_id` that can be used to
filter/join on a particular user ID or user ID column.
"""
return self.permissions_query.add_columns(
self.user_mapping_column.label('user_id')
)
def get_all_permissions_without_cache(self):
"""Get all permissions that are joined to this User, whether directly, through permission
bundles, or through user groups.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return frozenset(self.permissions_query.filter(
self.user_mapping_column == self._primary_key
))
def get_all_permissions(self):
"""Same as `get_all_permissions_without_cache` but uses a cached result after the first
call.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
self._cached_permissions = (self._cached_permissions
or self.get_all_permissions_without_cache())
return self._cached_permissions
def has_permissions(self, *tokens):
"""Returns True IFF every given permission token is present in the user's permission set.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return frozenset(tokens) <= {x.token for x in self.get_all_permissions()}
def has_any_permissions(self, *tokens):
"""Returns True IFF any of the given permission tokens are present in the user's permission
set.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return not frozenset(tokens).isdisjoint(x.token for x in self.get_all_permissions())
def make_password_mixin(history_entity_mixin=object, crypt_context=None):
"""Returns a mixin that adds password history and utility functions for working with passwords.
:param history_entity_mixin: is an optional mixin to add to the password history entity.
Supply a mixin if you want to include customized meta-information
for each password in the history log.
:param crypt_context: is an optional default :class:`CryptContext` object for hashing passwords.
If not supplied you must override the `get_crypt_context` method to
provide one.
"""
return PasswordMixin
def make_login_history_mixin(history_entity_mixin=object):
"""Returns a mixin that adds login history relationships.
:param history_entity_mixin: an optional mixin to add to the login history entity. Supply a
mixin if you want to include customized meta-information for each
entry in the history log.
"""
return LoginHistoryMixin
| 39.835938 | 100 | 0.638655 | from __future__ import absolute_import
from six import text_type
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.inspection import inspect
import sqlalchemy.orm as saorm
from . import entities as ents
from . import interfaces
class KegBouncerMixin(object):
_invalid_primary_key_error = AttributeError(
'This KegBouncer mixin requires your entity class to have exactly 1 primary key field.')
@classmethod
def _primary_key_column(cls):
pk_attrs = [value for value in cls.__dict__.values()
if getattr(value, 'primary_key', None) is True]
if len(pk_attrs) != 1:
raise cls._invalid_primary_key_error
return pk_attrs[0]
def _primary_key_value(self):
"""
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
values = inspect(self.__class__).primary_key_from_instance(self)
if len(values) != 1: # pragma: no cover
raise self._invalid_primary_key_error
return values[0]
@hybrid_property
def _primary_key(self):
return self._primary_key_value()
@_primary_key.expression
def _primary_key(cls):
return cls._primary_key_column()
class PermissionMixin(interfaces.HasPermissions, KegBouncerMixin):
"""A mixin that adds permission facilities to a SQLAlchemy declarative user entity.
A class which mixes this in must provide one of the following:
* An `id` column member which represents the primary key. The actual column may have any
name and any type.
* Or, a `primary_key_column` class variable that gives the name of the primary key column
as a string.
"""
# Instances will shadow this when populating their own cache.
_cached_permissions = None
@declared_attr
def user_groups(cls):
return saorm.relationship(ents.UserGroup,
secondary=cls.user_user_group_map,
cascade='all',
passive_deletes=True,
backref='users')
@declared_attr
def user_user_group_map(cls):
"""A linking (mapping) table between users and user groups."""
return ents.make_user_to_user_group_link(cls._primary_key_column(), cls.__tablename__)
@hybrid_property
def user_mapping_column(self):
return self.user_user_group_map.c.user_id
@hybrid_property
def permissions_query(self):
"""A query that maps users to permissions through all possible avenues."""
return ents.joined_permission_query().join(
self.user_user_group_map,
sa.or_(
self.user_user_group_map.c.user_group_id
== ents.user_group_permission_map.c.user_group_id, # noqa
self.user_user_group_map.c.user_group_id
== ents.user_group_bundle_map.c.user_group_id # noqa
)
)
@hybrid_property
def permissions_with_user_id_query(self):
"""
Like `permissions_query` but adds a column called `user_id` that can be used to
filter/join on a particular user ID or user ID column.
"""
return self.permissions_query.add_columns(
self.user_mapping_column.label('user_id')
)
def get_all_permissions_without_cache(self):
"""Get all permissions that are joined to this User, whether directly, through permission
bundles, or through user groups.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return frozenset(self.permissions_query.filter(
self.user_mapping_column == self._primary_key
))
def get_all_permissions(self):
"""Same as `get_all_permissions_without_cache` but uses a cached result after the first
call.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
self._cached_permissions = (self._cached_permissions
or self.get_all_permissions_without_cache())
return self._cached_permissions
def has_permissions(self, *tokens):
"""Returns True IFF every given permission token is present in the user's permission set.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return frozenset(tokens) <= {x.token for x in self.get_all_permissions()}
def has_any_permissions(self, *tokens):
"""Returns True IFF any of the given permission tokens are present in the user's permission
set.
Warning: Calling this method on a deleted entity may raise
:class:`sqlalchemy.orm.exc.ObjectDeletedError`.
"""
return not frozenset(tokens).isdisjoint(x.token for x in self.get_all_permissions())
def reset_permission_cache(self):
self._cached_permissions = None
def make_password_mixin(history_entity_mixin=object, crypt_context=None):
"""Returns a mixin that adds password history and utility functions for working with passwords.
:param history_entity_mixin: is an optional mixin to add to the password history entity.
Supply a mixin if you want to include customized meta-information
for each password in the history log.
:param crypt_context: is an optional default :class:`CryptContext` object for hashing passwords.
If not supplied you must override the `get_crypt_context` method to
provide one.
"""
class PasswordMixin(interfaces.HasPassword, KegBouncerMixin):
default_crypt_context = crypt_context
def get_crypt_context(self):
"""Returns a passlib :class:`CryptContext` object for hashing passwords.
If you supplied a default :class:`CryptContext` when building the mixin, this will
return it. Otherwise you need to override this method to return one.
"""
if not self.default_crypt_context: # pragma: no cover
raise NotImplementedError(
'You must specify class-member `default_crypt_context` or override this method'
' to provide a CryptContext for password hashing.')
return self.default_crypt_context
@declared_attr
def password_history(cls):
entity = cls.password_history_entity
return saorm.relationship(
entity,
order_by=entity.created_at.desc(),
cascade='all'
)
@declared_attr
def password_history_entity(cls):
return ents.make_password_history_entity(
cls._primary_key_column(),
cls.__tablename__,
history_entity_mixin
)
@property
def password(self):
return (self.password_history[0].password
if len(self.password_history) else None)
def verify_password(self, password):
crypt_context = self.get_crypt_context()
return (crypt_context.verify(text_type(password), self.password_history[0].password)
if self.password_history else False)
def is_password_used_previously(self, password):
crypt_context = self.get_crypt_context()
return any(crypt_context.verify(text_type(password), x.password)
for x in self.password_history)
def set_password(self, password, **kwargs):
"""Sets a new password by adding it to the password history log.
:param password: is the new password, in plaintext. It will be hashed by the
CryptContext from `get_crypt_context`.
:param kwargs: any other fields to pass to the password history entity (if you set a
custom mixin for it).
"""
crypt_context = self.get_crypt_context()
password_entry = self.password_history_entity(
password=crypt_context.hash(text_type(password)),
**kwargs
)
# Assume the new password is more recent than the others and insert it at the head.
self.password_history.insert(0, password_entry)
# If we have timestamps for all history, we can sort them.
if not any(x.created_at is None for x in self.password_history):
self.password_history.sort(key=lambda x: x.created_at, reverse=True)
return PasswordMixin
def make_login_history_mixin(history_entity_mixin=object):
"""Returns a mixin that adds login history relationships.
:param history_entity_mixin: an optional mixin to add to the login history entity. Supply a
mixin if you want to include customized meta-information for each
entry in the history log.
"""
class LoginHistoryMixin(KegBouncerMixin):
@property
def last_login(self):
"""Relationship to login history entity."""
return self.login_history[0] if len(self.login_history) else None
@declared_attr
def login_history(cls):
"""Relationship to login history entity."""
entity = cls.login_history_entity
return saorm.relationship(
entity,
order_by=entity.created_at.desc(),
cascade='all, delete, delete-orphan'
)
@declared_attr
def login_history_entity(cls):
"""A login history entity for this entity."""
return ents.make_login_history_entity(
cls._primary_key_column(),
cls.__tablename__,
history_entity_mixin
)
return LoginHistoryMixin
| 1,669 | 3,532 | 154 |
b453287cb3674869ddac9c2934a89c666a4bc7f3 | 1,972 | py | Python | ayame/exception.py | hattya/ayame | e8bb2b0ace79cd358b1384270cb9c5e809e12b5d | [
"MIT"
] | 1 | 2022-03-05T03:21:13.000Z | 2022-03-05T03:21:13.000Z | ayame/exception.py | hattya/ayame | e8bb2b0ace79cd358b1384270cb9c5e809e12b5d | [
"MIT"
] | 1 | 2021-08-25T13:41:34.000Z | 2021-08-25T13:41:34.000Z | ayame/exception.py | hattya/ayame | e8bb2b0ace79cd358b1384270cb9c5e809e12b5d | [
"MIT"
] | 1 | 2018-03-04T21:47:27.000Z | 2018-03-04T21:47:27.000Z | #
# ayame.exception
#
# Copyright (c) 2011-2021 Akinori Hattori <hattya@gmail.com>
#
# SPDX-License-Identifier: MIT
#
__all__ = ['AyameError', 'ComponentError', 'ConversionError', 'MarkupError',
'RenderingError', 'ResourceError', 'RouteError', 'ValidationError']
| 22.666667 | 91 | 0.608012 | #
# ayame.exception
#
# Copyright (c) 2011-2021 Akinori Hattori <hattya@gmail.com>
#
# SPDX-License-Identifier: MIT
#
__all__ = ['AyameError', 'ComponentError', 'ConversionError', 'MarkupError',
'RenderingError', 'ResourceError', 'RouteError', 'ValidationError']
class AyameError(Exception):
pass
class ComponentError(AyameError):
pass
class ConversionError(AyameError):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.converter = kwargs.get('converter')
self.value = kwargs.get('value')
self.type = kwargs.get('type')
class MarkupError(AyameError):
pass
class _Redirect(AyameError):
INTERNAL = -1
PERMANENT = 1
TEMPORARY = 2
def __init__(self, object, values=None, anchor=None, type=None):
super().__init__(object, values, anchor, type)
class RenderingError(AyameError):
pass
class ResourceError(AyameError):
pass
class RouteError(AyameError):
pass
class _RequestSlash(RouteError):
pass
class ValidationError(AyameError):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.component = kwargs.get('component')
self.keys = []
self.vars = {}
validator = kwargs.get('validator')
if validator:
key = validator.__class__.__name__
variation = kwargs.get('variation')
if variation:
key += '.' + variation
self.keys.append(key)
def __repr__(self):
args = repr(self.args)[1:-1].rstrip(',') + ', ' if self.args else ''
return f'{self.__class__.__name__}({args}keys={self.keys}, vars={list(self.vars)})'
def __str__(self):
if self.component:
for key in self.keys:
msg = self.component.tr(key)
if msg is not None:
return msg.format(**self.vars)
return str(self.args[0]) if len(self.args) > 0 else ''
| 1,097 | 248 | 338 |
0a81ea0ef6d2d874fd43ce68bdd826f3b73b58b6 | 3,006 | py | Python | run_evaluation.py | Wuqiman/AiriaCvlib_runtime | 8f97d6580f1b7a1ded082e87976d0e77b2269f27 | [
"MIT"
] | 1 | 2021-05-19T09:57:36.000Z | 2021-05-19T09:57:36.000Z | run_evaluation.py | Wuqiman/AiriaCvlib_runtime | 8f97d6580f1b7a1ded082e87976d0e77b2269f27 | [
"MIT"
] | null | null | null | run_evaluation.py | Wuqiman/AiriaCvlib_runtime | 8f97d6580f1b7a1ded082e87976d0e77b2269f27 | [
"MIT"
] | 1 | 2019-08-15T06:17:53.000Z | 2019-08-15T06:17:53.000Z | """
The evaluation entry point for WIDER Challenge 2019: Face Detection Accuracy+Runtime Track.
It will be the entrypoint for the evaluation docker once built.
Basically It downloads a list of images and run the face detector on each image.
Then the runtime and detection output will be reported to the evaluation system.
The participants are expected to implement a face detector class. The sample detector illustrates the interface.
Do not modify other part of the evaluation toolkit otherwise the evaluation will fail.
Author: Yuanjun Xiong
Contact: bitxiong@gmail.com
WIDER Challenge 2019
"""
import time
import sys
import logging
import numpy as np
from eval_kit.client import upload_eval_output, get_image_iter, get_job_id
logging.basicConfig(level=logging.INFO)
########################################################################################################
# please change these lines to include your own face detector extending the eval_kit.detector.FaceDetector base class.
sys.path.append("mmdetection")
from mm_detector import MMDetector as WIDERTestFaceDetectorClass
########################################################################################################
def evaluate_runtime(detector_class, image_iter, job_id):
"""
Please DO NOT modify this part of code or the eval_kit
Modification of the evaluation toolkit could result in cancellation of your award.
In this function we create the detector instance. And evaluate the wall time for performing face detection.
"""
# initialize the detector
logging.info("Initializing face detector.")
try:
detector = detector_class()
except:
# send errors to the eval frontend
raise
logging.info("Detector initialized.")
# run the images one-by-one and get runtime
overall_time = 0
output_boxes = {}
output_time = {}
eval_cnt = 0
logging.info("Starting runtime evaluation")
for image_id, image in image_iter:
time_before = time.time()
try:
boxes = detector.process_image(image)
assert isinstance(boxes, np.ndarray)
output_boxes[image_id] = boxes
except:
# send errors to the eval frontend
logging.error("Image id failed: {}".format(image_id))
raise
elapsed = time.time() - time_before
output_time[image_id] = elapsed
logging.info("image {} run time: {}".format(image_id, elapsed))
overall_time += elapsed
eval_cnt += 1
if eval_cnt % 100 == 0:
logging.info("Finished {} images".format(eval_cnt))
logging.info("all image finished, uploading evaluation outputs for evaluation.")
# send evaluation output to the server
upload_eval_output(output_boxes, output_time, job_id)
if __name__ == '__main__':
job_id = get_job_id()
wider_test_image_iter = get_image_iter()
evaluate_runtime(WIDERTestFaceDetectorClass, wider_test_image_iter, job_id)
| 33.032967 | 118 | 0.666334 | """
The evaluation entry point for WIDER Challenge 2019: Face Detection Accuracy+Runtime Track.
It will be the entrypoint for the evaluation docker once built.
Basically It downloads a list of images and run the face detector on each image.
Then the runtime and detection output will be reported to the evaluation system.
The participants are expected to implement a face detector class. The sample detector illustrates the interface.
Do not modify other part of the evaluation toolkit otherwise the evaluation will fail.
Author: Yuanjun Xiong
Contact: bitxiong@gmail.com
WIDER Challenge 2019
"""
import time
import sys
import logging
import numpy as np
from eval_kit.client import upload_eval_output, get_image_iter, get_job_id
logging.basicConfig(level=logging.INFO)
########################################################################################################
# please change these lines to include your own face detector extending the eval_kit.detector.FaceDetector base class.
sys.path.append("mmdetection")
from mm_detector import MMDetector as WIDERTestFaceDetectorClass
########################################################################################################
def evaluate_runtime(detector_class, image_iter, job_id):
"""
Please DO NOT modify this part of code or the eval_kit
Modification of the evaluation toolkit could result in cancellation of your award.
In this function we create the detector instance. And evaluate the wall time for performing face detection.
"""
# initialize the detector
logging.info("Initializing face detector.")
try:
detector = detector_class()
except:
# send errors to the eval frontend
raise
logging.info("Detector initialized.")
# run the images one-by-one and get runtime
overall_time = 0
output_boxes = {}
output_time = {}
eval_cnt = 0
logging.info("Starting runtime evaluation")
for image_id, image in image_iter:
time_before = time.time()
try:
boxes = detector.process_image(image)
assert isinstance(boxes, np.ndarray)
output_boxes[image_id] = boxes
except:
# send errors to the eval frontend
logging.error("Image id failed: {}".format(image_id))
raise
elapsed = time.time() - time_before
output_time[image_id] = elapsed
logging.info("image {} run time: {}".format(image_id, elapsed))
overall_time += elapsed
eval_cnt += 1
if eval_cnt % 100 == 0:
logging.info("Finished {} images".format(eval_cnt))
logging.info("all image finished, uploading evaluation outputs for evaluation.")
# send evaluation output to the server
upload_eval_output(output_boxes, output_time, job_id)
if __name__ == '__main__':
job_id = get_job_id()
wider_test_image_iter = get_image_iter()
evaluate_runtime(WIDERTestFaceDetectorClass, wider_test_image_iter, job_id)
| 0 | 0 | 0 |
f92d3178700ef76d9de749d4bb2ef55d19985f07 | 2,539 | py | Python | tofnet/data/preprocess.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | null | null | null | tofnet/data/preprocess.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | 8 | 2021-02-02T23:07:37.000Z | 2022-03-12T00:51:26.000Z | tofnet/data/preprocess.py | victorjoos/tof2net | 068f5f08a241dbfb950251bea52fd9379466bf2f | [
"MIT"
] | 2 | 2020-10-01T08:23:24.000Z | 2020-11-09T22:01:47.000Z | import cv2
import torch.nn.functional as F
import torch
import matplotlib.pyplot as plt
import cv2
def normalize(imgs, img_type, **kwargs):
""" Normalizes differently depending on type
"""
if img_type in {"image"}:
return imgs/255.0
elif img_type == "mask" or img_type == "class":
return imgs.long()
elif img_type in {"pcd"}:
imgs = imgs
return imgs
else:
return imgs | 29.870588 | 76 | 0.573848 | import cv2
import torch.nn.functional as F
import torch
import matplotlib.pyplot as plt
import cv2
def predict_preprocess():
def _predict(img, img_type, **kwargs):
if img_type in {"pcd"}:
print(img["shape"])
img = torch.tensor(img["points"])
img[torch.isnan(img)] = 0
img = img.reshape(120,160,3)
img = torch.flip(img, dims=(0,1))
img = img.reshape(-1, 3)
return img.unsqueeze(0)
else:
imgs = to_tensor(img)
return imgs.unsqueeze(0)
return _predict
def to_tensor(pic):
if pic.ndim == 2:
pic = pic[:, :, None]
return torch.from_numpy(pic.transpose((2, 0, 1)).copy())
def get_resize(new_size):
def resize(imgs, img_type, **kwargs):
if img_type in {"class", "pcd"}:
return imgs
elif img_type not in {"mask", "image", "depth", "keypoints", "var"}:
raise ValueError(f"type {img_type} not known")
if img_type == "mask":
imgs = imgs.unsqueeze(1)
imgs = F.interpolate(imgs, size=new_size,
mode="bilinear" if img_type=="image" else "nearest",
**({"align_corners":True} if img_type=="image" else {}))
if img_type == "mask":
imgs = imgs.squeeze(1)
return imgs
return resize
def get_device(device):
def move_to_device(imgs, img_type, **kwargs):
imgs = imgs.to(device=device, dtype=torch.float32)
return imgs
return move_to_device
def get_gpu():
gpu = torch.device("cuda:0")
def move_to_gpu(imgs, img_type, **kwargs):
imgs = imgs.to(device=gpu, dtype=torch.float32)
return imgs
return move_to_gpu
def normalize(imgs, img_type, **kwargs):
""" Normalizes differently depending on type
"""
if img_type in {"image"}:
return imgs/255.0
elif img_type == "mask" or img_type == "class":
return imgs.long()
elif img_type in {"pcd"}:
imgs = imgs
return imgs
else:
return imgs
def crop_center(img, start_prop=0):
y,x = img.shape[-2:]
start = x//start_prop if start_prop!= 0 else 0
cropx = min(x,y)
cropy = min(x,y)
startx = (x//2-(cropx//2)+start)
if start < 0:
startx = 0
elif start > 0:
startx = x - cropx
starty = y//2-(cropy//2)
if img.dim() == 4:
return img[:,:,starty:starty+cropy,startx:startx+cropx]
elif img.dim() == 3:
return img[:,starty:starty+cropy,startx:startx+cropx] | 1,968 | 0 | 138 |
43c8c8ea95976909f215916195b8c67fbec890a8 | 3,448 | py | Python | idgo_admin/exceptions.py | neogeo-technologies/idgo | 23e028b0d7fb2daf54d7e2954e0cc4d7b9be4210 | [
"Apache-2.0"
] | null | null | null | idgo_admin/exceptions.py | neogeo-technologies/idgo | 23e028b0d7fb2daf54d7e2954e0cc4d7b9be4210 | [
"Apache-2.0"
] | 2 | 2018-09-14T07:12:00.000Z | 2019-11-13T09:32:24.000Z | idgo_admin/exceptions.py | neogeo-technologies/idgo | 23e028b0d7fb2daf54d7e2954e0cc4d7b9be4210 | [
"Apache-2.0"
] | 2 | 2019-03-25T08:27:43.000Z | 2019-10-07T15:25:30.000Z | # Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.http import Http404
from functools import wraps
# Définition des exceptions
# =========================
# Utilitaires
# ===========
| 26.9375 | 94 | 0.609919 | # Copyright (c) 2017-2021 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.http import Http404
from functools import wraps
# Définition des exceptions
# =========================
class GenericException(Exception):
# TODO: Logger __dict__
message = (
"Une erreur s'est produite, si le problème persiste "
"veuillez contacter l'administrateur du site.")
def __init__(self, *args, **kwargs):
self.args = args
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return self.error or self.message
@property
def error(self):
return ' '.join(self.args)
class ConflictError(GenericException):
pass
class CriticalError(GenericException):
pass
class DatagisBaseError(GenericException):
pass
class DatasetConflictError(GenericException):
pass
class FakeError(GenericException):
message = "Ceci n'est pas une erreur."
class ExceedsMaximumLayerNumberFixedError(GenericException):
message = "Votre ficher contient plus de jeux de données que ne l'autorise l'application."
def __str__(self):
try:
sentences = [
"Le fichier contient {} jeu{} de données géographiques.".format(
self.count, self.count > 1 and 'x' or ''),
"Vous ne pouvez pas ajouter plus de {} jeu{} de données.".format(
self.maximum, self.maximum > 1 and 'x' or '')]
except Exception:
return super().__str__()
return ' '.join(sentences)
class ProfileHttp404(Http404):
pass
class SizeLimitExceededError(GenericException):
message = 'La taille de la pièce jointe dépasse la limite autorisée.'
def __init__(self, *args, **kwargs):
self.message = \
kwargs.get('max_size') \
and '{0} La taille est limité à {1}o'.format(
self.message, kwargs['max_size'])
super().__init__(*args, **kwargs)
# Utilitaires
# ===========
class ExceptionsHandler(object):
def __init__(self, ignore=None, actions=None):
self.ignore = ignore or []
self.actions = actions or {}
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
# request = None
args = list(args)
# for arg in args:
# if isinstance(arg, WSGIRequest):
# request = arg
try:
return f(*args, **kwargs)
except Exception as e:
for exception, callback in self.actions.items():
if isinstance(e, exception):
return callback()
if self.is_ignored(e):
return f(*args, **kwargs)
raise e
return wrapper
def is_ignored(self, exception):
return type(exception) in self.ignore
| 1,607 | 764 | 311 |
81b453cb363394127db235b8f56c45563d8ace14 | 2,744 | py | Python | flask/server.py | Tansatija/GithubArt | 388cdc5fde0b8d0779790bb2a27fb058a953d8ac | [
"MIT"
] | 6 | 2019-05-03T19:15:13.000Z | 2021-09-25T20:12:22.000Z | flask/server.py | DumbMachine/GithubArt | 6b739f8740b886578b9ff165968a9266a23c46cc | [
"MIT"
] | 1 | 2019-05-03T19:05:51.000Z | 2019-05-03T19:05:51.000Z | flask/server.py | Tansatija/GithubArt | 388cdc5fde0b8d0779790bb2a27fb058a953d8ac | [
"MIT"
] | 2 | 2021-04-04T14:35:37.000Z | 2021-07-19T04:31:03.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Another useless python script
@author: ratin
"""
#TODO: Implement Authentication System
#TODO: Implement feature to save in Mongo the books which are asked often
#TODO: Implement feature to ask for the number of books sent in the API
import datetime
import sys
from json import dumps
from flask import Flask, request
from flask.json import jsonify
from flask_cors import CORS, cross_origin
from flask_restful import Api, Resource
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# api = Api(app , errors={
# 'NotFound': {
# 'message': "Something is missing.",
# 'status': 404,
# }
# }
# )
# class Serve(Resource):
# def __init__(self,book_type="novel"):
# self.message= "Working"
# self.git_name = request.args.get("git_name")
# self.git_email = request.args.get("git_email")
# self.data = request.args.get('data')
# self.error = None
# def do(self):
# from Art import GithubArt
# self.message = "done"
# temp = GithubArt(self.git_email,self.git_name)
# temp.cnv_DataURL_image(self.data)
# temp.cleanup()
# # self.message = temp.everything()
# # temp.finish()
# def get(self):
# self.do()
# print("==================")
# print(self.data)
# print("==================")
# print(type(self.data))
# if self.error:
# return {
# "error": self.error,
# "data" : "",
# 'name' : "",
# 'email': "",
# "error": "",
# "time": str(datetime.datetime.now())[:]
# }
# else:
# return {
# "data": self.data,
# 'name': self.git_name,
# 'email': self.git_email,
# "error": self.error,
# "time": str(datetime.datetime.now())[:]
# }
# api.add_resource(Serve, "/api")
@app.route('/test/', methods=['GET','POST'])
@cross_origin()
if __name__ == "__main__":
app.run(port="5002",debug=True)
| 26.133333 | 73 | 0.532434 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Another useless python script
@author: ratin
"""
#TODO: Implement Authentication System
#TODO: Implement feature to save in Mongo the books which are asked often
#TODO: Implement feature to ask for the number of books sent in the API
import datetime
import sys
from json import dumps
from flask import Flask, request
from flask.json import jsonify
from flask_cors import CORS, cross_origin
from flask_restful import Api, Resource
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# api = Api(app , errors={
# 'NotFound': {
# 'message': "Something is missing.",
# 'status': 404,
# }
# }
# )
# class Serve(Resource):
# def __init__(self,book_type="novel"):
# self.message= "Working"
# self.git_name = request.args.get("git_name")
# self.git_email = request.args.get("git_email")
# self.data = request.args.get('data')
# self.error = None
# def do(self):
# from Art import GithubArt
# self.message = "done"
# temp = GithubArt(self.git_email,self.git_name)
# temp.cnv_DataURL_image(self.data)
# temp.cleanup()
# # self.message = temp.everything()
# # temp.finish()
# def get(self):
# self.do()
# print("==================")
# print(self.data)
# print("==================")
# print(type(self.data))
# if self.error:
# return {
# "error": self.error,
# "data" : "",
# 'name' : "",
# 'email': "",
# "error": "",
# "time": str(datetime.datetime.now())[:]
# }
# else:
# return {
# "data": self.data,
# 'name': self.git_name,
# 'email': self.git_email,
# "error": self.error,
# "time": str(datetime.datetime.now())[:]
# }
# api.add_resource(Serve, "/api")
@app.route('/test/', methods=['GET','POST'])
@cross_origin()
def test():
import json
if request.method == "GET":
print("GET")
return "Dont Come here"
clicked=None
if request.method == "POST":
print("POST")
clicked=request.data
from Art import GithubArt
something = json.loads(clicked.decode())
print("clicked; ", something['name'])
temp = GithubArt(something["email"], something['name'])
temp.cnv_DataURL_image(something['DataURL'])
print(temp.everything())
temp.finish()
# temp.cleanup()
return "."
if __name__ == "__main__":
app.run(port="5002",debug=True)
| 539 | 0 | 22 |