hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db08017fe044db65092dd00ed22dea1c4564f406
| 699
|
py
|
Python
|
test/module_dir/mymodule/__init__.py
|
honzajavorek/mkdocs_macros_plugin
|
c97c2e08e3c1cb9023b28a605784e0a7ac45b885
|
[
"MIT"
] | null | null | null |
test/module_dir/mymodule/__init__.py
|
honzajavorek/mkdocs_macros_plugin
|
c97c2e08e3c1cb9023b28a605784e0a7ac45b885
|
[
"MIT"
] | null | null | null |
test/module_dir/mymodule/__init__.py
|
honzajavorek/mkdocs_macros_plugin
|
c97c2e08e3c1cb9023b28a605784e0a7ac45b885
|
[
"MIT"
] | null | null | null |
import os
def define_env(env):
"""
This is the hook for the functions (new form)
"""
env.variables.cwd = os.getcwd()
# use dot notation for adding
env.variables.baz = env.variables.fix_url('foo')
# Optional: a special function for making relative urls point to root
fix_url = env.variables.fix_url
@env.macro
def button(label, url):
"Add a button"
url = fix_url(url)
HTML = """<a class='md-button' href="%s">%s</a>"""
return HTML % (url, label)
env.variables.special_docs_dir = env.variables.config['docs_dir']
@env.macro
def show_nav():
"Show the navigation"
return env.conf['nav']
| 20.558824
| 73
| 0.602289
| 0
| 0
| 0
| 0
| 273
| 0.390558
| 0
| 0
| 257
| 0.367668
|
db086691881d363f79126af6b8d208d584242b29
| 114,519
|
py
|
Python
|
cisco-ios-xe/ydk/models/cisco_ios_xe/MPLS_LDP_STD_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/MPLS_LDP_STD_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/MPLS_LDP_STD_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" MPLS_LDP_STD_MIB
Copyright (C) The Internet Society (2004). The
initial version of this MIB module was published
in RFC 3815. For full legal notices see the RFC
itself or see\:
http\://www.ietf.org/copyrights/ianamib.html
This MIB contains managed object definitions for the
'Multiprotocol Label Switching, Label Distribution
Protocol, LDP' document.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class MPLSLDPSTDMIB(Entity):
"""
.. attribute:: mplsldplsrobjects
**type**\: :py:class:`MplsLdpLsrObjects <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpLsrObjects>`
.. attribute:: mplsldpentityobjects
**type**\: :py:class:`MplsLdpEntityObjects <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityObjects>`
.. attribute:: mplsldpsessionobjects
**type**\: :py:class:`MplsLdpSessionObjects <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpSessionObjects>`
.. attribute:: mplsfecobjects
**type**\: :py:class:`MplsFecObjects <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsFecObjects>`
.. attribute:: mplsldpentitytable
This table contains information about the MPLS Label Distribution Protocol Entities which exist on this Label Switching Router (LSR) or Label Edge Router (LER)
**type**\: :py:class:`MplsLdpEntityTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable>`
.. attribute:: mplsldppeertable
Information about LDP peers known by Entities in the mplsLdpEntityTable. The information in this table is based on information from the Entity\-Peer interaction during session initialization but is not appropriate for the mplsLdpSessionTable, because objects in this table may or may not be used in session establishment
**type**\: :py:class:`MplsLdpPeerTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable>`
.. attribute:: mplsldphelloadjacencytable
A table of Hello Adjacencies for Sessions
**type**\: :py:class:`MplsLdpHelloAdjacencyTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable>`
.. attribute:: mplsinsegmentldplsptable
A table of LDP LSP's which map to the mplsInSegmentTable in the MPLS\-LSR\-STD\-MIB module
**type**\: :py:class:`MplsInSegmentLdpLspTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsInSegmentLdpLspTable>`
.. attribute:: mplsoutsegmentldplsptable
A table of LDP LSP's which map to the mplsOutSegmentTable in the MPLS\-LSR\-STD\-MIB
**type**\: :py:class:`MplsOutSegmentLdpLspTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable>`
.. attribute:: mplsfectable
This table represents the FEC (Forwarding Equivalence Class) Information associated with an LSP
**type**\: :py:class:`MplsFecTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsFecTable>`
.. attribute:: mplsldplspfectable
A table which shows the relationship between LDP LSPs and FECs. Each row represents a single LDP LSP to FEC association
**type**\: :py:class:`MplsLdpLspFecTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpLspFecTable>`
.. attribute:: mplsldpsessionpeeraddrtable
This table 'extends' the mplsLdpSessionTable. This table is used to store Label Address Information from Label Address Messages received by this LSR from Peers. This table is read\-only and should be updated when Label Withdraw Address Messages are received, i.e., Rows should be deleted as appropriate. NOTE\: since more than one address may be contained in a Label Address Message, this table 'sparse augments', the mplsLdpSessionTable's information
**type**\: :py:class:`MplsLdpSessionPeerAddrTable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB, self).__init__()
self._top_entity = None
self.yang_name = "MPLS-LDP-STD-MIB"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsLdpLsrObjects", ("mplsldplsrobjects", MPLSLDPSTDMIB.MplsLdpLsrObjects)), ("mplsLdpEntityObjects", ("mplsldpentityobjects", MPLSLDPSTDMIB.MplsLdpEntityObjects)), ("mplsLdpSessionObjects", ("mplsldpsessionobjects", MPLSLDPSTDMIB.MplsLdpSessionObjects)), ("mplsFecObjects", ("mplsfecobjects", MPLSLDPSTDMIB.MplsFecObjects)), ("mplsLdpEntityTable", ("mplsldpentitytable", MPLSLDPSTDMIB.MplsLdpEntityTable)), ("mplsLdpPeerTable", ("mplsldppeertable", MPLSLDPSTDMIB.MplsLdpPeerTable)), ("mplsLdpHelloAdjacencyTable", ("mplsldphelloadjacencytable", MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable)), ("mplsInSegmentLdpLspTable", ("mplsinsegmentldplsptable", MPLSLDPSTDMIB.MplsInSegmentLdpLspTable)), ("mplsOutSegmentLdpLspTable", ("mplsoutsegmentldplsptable", MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable)), ("mplsFecTable", ("mplsfectable", MPLSLDPSTDMIB.MplsFecTable)), ("mplsLdpLspFecTable", ("mplsldplspfectable", MPLSLDPSTDMIB.MplsLdpLspFecTable)), ("mplsLdpSessionPeerAddrTable", ("mplsldpsessionpeeraddrtable", MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable))])
self._leafs = OrderedDict()
self.mplsldplsrobjects = MPLSLDPSTDMIB.MplsLdpLsrObjects()
self.mplsldplsrobjects.parent = self
self._children_name_map["mplsldplsrobjects"] = "mplsLdpLsrObjects"
self.mplsldpentityobjects = MPLSLDPSTDMIB.MplsLdpEntityObjects()
self.mplsldpentityobjects.parent = self
self._children_name_map["mplsldpentityobjects"] = "mplsLdpEntityObjects"
self.mplsldpsessionobjects = MPLSLDPSTDMIB.MplsLdpSessionObjects()
self.mplsldpsessionobjects.parent = self
self._children_name_map["mplsldpsessionobjects"] = "mplsLdpSessionObjects"
self.mplsfecobjects = MPLSLDPSTDMIB.MplsFecObjects()
self.mplsfecobjects.parent = self
self._children_name_map["mplsfecobjects"] = "mplsFecObjects"
self.mplsldpentitytable = MPLSLDPSTDMIB.MplsLdpEntityTable()
self.mplsldpentitytable.parent = self
self._children_name_map["mplsldpentitytable"] = "mplsLdpEntityTable"
self.mplsldppeertable = MPLSLDPSTDMIB.MplsLdpPeerTable()
self.mplsldppeertable.parent = self
self._children_name_map["mplsldppeertable"] = "mplsLdpPeerTable"
self.mplsldphelloadjacencytable = MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable()
self.mplsldphelloadjacencytable.parent = self
self._children_name_map["mplsldphelloadjacencytable"] = "mplsLdpHelloAdjacencyTable"
self.mplsinsegmentldplsptable = MPLSLDPSTDMIB.MplsInSegmentLdpLspTable()
self.mplsinsegmentldplsptable.parent = self
self._children_name_map["mplsinsegmentldplsptable"] = "mplsInSegmentLdpLspTable"
self.mplsoutsegmentldplsptable = MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable()
self.mplsoutsegmentldplsptable.parent = self
self._children_name_map["mplsoutsegmentldplsptable"] = "mplsOutSegmentLdpLspTable"
self.mplsfectable = MPLSLDPSTDMIB.MplsFecTable()
self.mplsfectable.parent = self
self._children_name_map["mplsfectable"] = "mplsFecTable"
self.mplsldplspfectable = MPLSLDPSTDMIB.MplsLdpLspFecTable()
self.mplsldplspfectable.parent = self
self._children_name_map["mplsldplspfectable"] = "mplsLdpLspFecTable"
self.mplsldpsessionpeeraddrtable = MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable()
self.mplsldpsessionpeeraddrtable.parent = self
self._children_name_map["mplsldpsessionpeeraddrtable"] = "mplsLdpSessionPeerAddrTable"
self._segment_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB, [], name, value)
class MplsLdpLsrObjects(Entity):
"""
.. attribute:: mplsldplsrid
The Label Switching Router's Identifier
**type**\: str
**length:** 4
.. attribute:: mplsldplsrloopdetectioncapable
A indication of whether this Label Switching Router supports loop detection. none(1) \-\- Loop Detection is not supported on this LSR. other(2) \-\- Loop Detection is supported but by a method other than those listed below. hopCount(3) \-\- Loop Detection is supported by Hop Count only. pathVector(4) \-\- Loop Detection is supported by Path Vector only. hopCountAndPathVector(5) \-\- Loop Detection is supported by both Hop Count And Path Vector. Since Loop Detection is determined during Session Initialization, an individual session may not be running with loop detection. This object simply gives an indication of whether or not the LSR has the ability to support Loop Detection and which types
**type**\: :py:class:`MplsLdpLsrLoopDetectionCapable <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpLsrObjects.MplsLdpLsrLoopDetectionCapable>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpLsrObjects, self).__init__()
self.yang_name = "mplsLdpLsrObjects"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldplsrid', (YLeaf(YType.str, 'mplsLdpLsrId'), ['str'])),
('mplsldplsrloopdetectioncapable', (YLeaf(YType.enumeration, 'mplsLdpLsrLoopDetectionCapable'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpLsrObjects.MplsLdpLsrLoopDetectionCapable')])),
])
self.mplsldplsrid = None
self.mplsldplsrloopdetectioncapable = None
self._segment_path = lambda: "mplsLdpLsrObjects"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpLsrObjects, [u'mplsldplsrid', u'mplsldplsrloopdetectioncapable'], name, value)
class MplsLdpLsrLoopDetectionCapable(Enum):
"""
MplsLdpLsrLoopDetectionCapable (Enum Class)
A indication of whether this
Label Switching Router supports
loop detection.
none(1) \-\- Loop Detection is not supported
on this LSR.
other(2) \-\- Loop Detection is supported but
by a method other than those
listed below.
hopCount(3) \-\- Loop Detection is supported by
Hop Count only.
pathVector(4) \-\- Loop Detection is supported by
Path Vector only.
hopCountAndPathVector(5) \-\- Loop Detection is
supported by both Hop Count
And Path Vector.
Since Loop Detection is determined during
Session Initialization, an individual session
may not be running with loop detection. This
object simply gives an indication of whether or not the
LSR has the ability to support Loop Detection and
which types.
.. data:: none = 1
.. data:: other = 2
.. data:: hopCount = 3
.. data:: pathVector = 4
.. data:: hopCountAndPathVector = 5
"""
none = Enum.YLeaf(1, "none")
other = Enum.YLeaf(2, "other")
hopCount = Enum.YLeaf(3, "hopCount")
pathVector = Enum.YLeaf(4, "pathVector")
hopCountAndPathVector = Enum.YLeaf(5, "hopCountAndPathVector")
class MplsLdpEntityObjects(Entity):
"""
.. attribute:: mplsldpentitylastchange
The value of sysUpTime at the time of the most recent addition or deletion of an entry to/from the mplsLdpEntityTable/mplsLdpEntityStatsTable, or the most recent change in value of any objects in the mplsLdpEntityTable. If no such changes have occurred since the last re\-initialization of the local management subsystem, then this object contains a zero value
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentityindexnext
This object contains an appropriate value to be used for mplsLdpEntityIndex when creating entries in the mplsLdpEntityTable. The value 0 indicates that no unassigned entries are available
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpEntityObjects, self).__init__()
self.yang_name = "mplsLdpEntityObjects"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentitylastchange', (YLeaf(YType.uint32, 'mplsLdpEntityLastChange'), ['int'])),
('mplsldpentityindexnext', (YLeaf(YType.uint32, 'mplsLdpEntityIndexNext'), ['int'])),
])
self.mplsldpentitylastchange = None
self.mplsldpentityindexnext = None
self._segment_path = lambda: "mplsLdpEntityObjects"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpEntityObjects, [u'mplsldpentitylastchange', u'mplsldpentityindexnext'], name, value)
class MplsLdpSessionObjects(Entity):
"""
.. attribute:: mplsldppeerlastchange
The value of sysUpTime at the time of the most recent addition or deletion to/from the mplsLdpPeerTable/mplsLdpSessionTable
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldplspfeclastchange
The value of sysUpTime at the time of the most recent addition/deletion of an entry to/from the mplsLdpLspFecTable or the most recent change in values to any objects in the mplsLdpLspFecTable. If no such changes have occurred since the last re\-initialization of the local management subsystem, then this object contains a zero value
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpSessionObjects, self).__init__()
self.yang_name = "mplsLdpSessionObjects"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldppeerlastchange', (YLeaf(YType.uint32, 'mplsLdpPeerLastChange'), ['int'])),
('mplsldplspfeclastchange', (YLeaf(YType.uint32, 'mplsLdpLspFecLastChange'), ['int'])),
])
self.mplsldppeerlastchange = None
self.mplsldplspfeclastchange = None
self._segment_path = lambda: "mplsLdpSessionObjects"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpSessionObjects, [u'mplsldppeerlastchange', u'mplsldplspfeclastchange'], name, value)
class MplsFecObjects(Entity):
"""
.. attribute:: mplsfeclastchange
The value of sysUpTime at the time of the most recent addition/deletion of an entry to/from the mplsLdpFectTable or the most recent change in values to any objects in the mplsLdpFecTable. If no such changes have occurred since the last re\-initialization of the local management subsystem, then this object contains a zero value
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsfecindexnext
This object contains an appropriate value to be used for mplsFecIndex when creating entries in the mplsFecTable. The value 0 indicates that no unassigned entries are available
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsFecObjects, self).__init__()
self.yang_name = "mplsFecObjects"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsfeclastchange', (YLeaf(YType.uint32, 'mplsFecLastChange'), ['int'])),
('mplsfecindexnext', (YLeaf(YType.uint32, 'mplsFecIndexNext'), ['int'])),
])
self.mplsfeclastchange = None
self.mplsfecindexnext = None
self._segment_path = lambda: "mplsFecObjects"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsFecObjects, [u'mplsfeclastchange', u'mplsfecindexnext'], name, value)
class MplsLdpEntityTable(Entity):
"""
This table contains information about the
MPLS Label Distribution Protocol Entities which
exist on this Label Switching Router (LSR)
or Label Edge Router (LER).
.. attribute:: mplsldpentityentry
An entry in this table represents an LDP entity. An entry can be created by a network administrator or by an SNMP agent as instructed by LDP
**type**\: list of :py:class:`MplsLdpEntityEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpEntityTable, self).__init__()
self.yang_name = "mplsLdpEntityTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsLdpEntityEntry", ("mplsldpentityentry", MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry))])
self._leafs = OrderedDict()
self.mplsldpentityentry = YList(self)
self._segment_path = lambda: "mplsLdpEntityTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpEntityTable, [], name, value)
class MplsLdpEntityEntry(Entity):
"""
An entry in this table represents an LDP entity.
An entry can be created by a network administrator
or by an SNMP agent as instructed by LDP.
.. attribute:: mplsldpentityldpid (key)
The LDP identifier
**type**\: str
.. attribute:: mplsldpentityindex (key)
This index is used as a secondary index to uniquely identify this row. Before creating a row in this table, the 'mplsLdpEntityIndexNext' object should be retrieved. That value should be used for the value of this index when creating a row in this table. NOTE\: if a value of zero (0) is retrieved, that indicates that no rows can be created in this table at this time. A secondary index (this object) is meaningful to some but not all, LDP implementations. For example an LDP implementation which uses PPP would use this index to differentiate PPP sub\-links. Another way to use this index is to give this the value of ifIndex. However, this is dependant on the implementation
**type**\: int
**range:** 1..4294967295
.. attribute:: mplsldpentityprotocolversion
The version number of the LDP protocol which will be used in the session initialization message. Section 3.5.3 in the LDP Specification specifies that the version of the LDP protocol is negotiated during session establishment. The value of this object represents the value that is sent in the initialization message
**type**\: int
**range:** 1..65535
.. attribute:: mplsldpentityadminstatus
The administrative status of this LDP Entity. If this object is changed from 'enable' to 'disable' and this entity has already attempted to establish contact with a Peer, then all contact with that Peer is lost and all information from that Peer needs to be removed from the MIB. (This implies that the network management subsystem should clean up any related entry in the mplsLdpPeerTable. This further implies that a 'tear\-down' for that session is issued and the session and all information related to that session cease to exist). At this point the operator is able to change values which are related to this entity. When the admin status is set back to 'enable', then this Entity will attempt to establish a new session with the Peer
**type**\: :py:class:`MplsLdpEntityAdminStatus <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry.MplsLdpEntityAdminStatus>`
.. attribute:: mplsldpentityoperstatus
The operational status of this LDP Entity. The value of unknown(1) indicates that the operational status cannot be determined at this time. The value of unknown should be a transient condition before changing to enabled(2) or disabled(3)
**type**\: :py:class:`MplsLdpEntityOperStatus <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry.MplsLdpEntityOperStatus>`
.. attribute:: mplsldpentitytcpport
The TCP Port for LDP. The default value is the well\-known value of this port
**type**\: int
**range:** 0..65535
.. attribute:: mplsldpentityudpdscport
The UDP Discovery Port for LDP. The default value is the well\-known value for this port
**type**\: int
**range:** 0..65535
.. attribute:: mplsldpentitymaxpdulength
The maximum PDU Length that is sent in the Common Session Parameters of an Initialization Message. According to the LDP Specification [RFC3036] a value of 255 or less specifies the default maximum length of 4096 octets, this is why the value of this object starts at 256. The operator should explicitly choose the default value (i.e., 4096), or some other value. The receiving LSR MUST calculate the maximum PDU length for the session by using the smaller of its and its peer's proposals for Max PDU Length
**type**\: int
**range:** 256..65535
**units**\: octets
.. attribute:: mplsldpentitykeepaliveholdtimer
The 16\-bit integer value which is the proposed keep alive hold timer for this LDP Entity
**type**\: int
**range:** 1..65535
**units**\: seconds
.. attribute:: mplsldpentityhelloholdtimer
The 16\-bit integer value which is the proposed Hello hold timer for this LDP Entity. The Hello Hold time in seconds. An LSR maintains a record of Hellos received from potential peers. This object represents the Hold Time in the Common Hello Parameters TLV of the Hello Message. A value of 0 is a default value and should be interpretted in conjunction with the mplsLdpEntityTargetPeer object. If the value of this object is 0\: if the value of the mplsLdpEntityTargetPeer object is false(2), then this specifies that the Hold Time's actual default value is 15 seconds (i.e., the default Hold time for Link Hellos is 15 seconds). Otherwise if the value of the mplsLdpEntityTargetPeer object is true(1), then this specifies that the Hold Time's actual default value is 45 seconds (i.e., the default Hold time for Targeted Hellos is 45 seconds). A value of 65535 means infinite (i.e., wait forever). All other values represent the amount of time in seconds to wait for a Hello Message. Setting the hold time to a value smaller than 15 is not recommended, although not forbidden according to RFC3036
**type**\: int
**range:** 0..65535
**units**\: seconds
.. attribute:: mplsldpentityinitsessionthreshold
When attempting to establish a session with a given Peer, the given LDP Entity should send out the SNMP notification, 'mplsLdpInitSessionThresholdExceeded', when the number of Session Initialization messages sent exceeds this threshold. The notification is used to notify an operator when this Entity and its Peer are possibly engaged in an endless sequence of messages as each NAKs the other's Initialization messages with Error Notification messages. Setting this threshold which triggers the notification is one way to notify the operator. The notification should be generated each time this threshold is exceeded and for every subsequent Initialization message which is NAK'd with an Error Notification message after this threshold is exceeded. A value of 0 (zero) for this object indicates that the threshold is infinity, thus the SNMP notification will never be generated
**type**\: int
**range:** 0..100
.. attribute:: mplsldpentitylabeldistmethod
For any given LDP session, the method of label distribution must be specified
**type**\: :py:class:`MplsLabelDistributionMethod <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsLabelDistributionMethod>`
.. attribute:: mplsldpentitylabelretentionmode
The LDP Entity can be configured to use either conservative or liberal label retention mode. If the value of this object is conservative(1) then advertized label mappings are retained only if they will be used to forward packets, i.e., if label came from a valid next hop. If the value of this object is liberal(2) then all advertized label mappings are retained whether they are from a valid next hop or not
**type**\: :py:class:`MplsRetentionMode <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsRetentionMode>`
.. attribute:: mplsldpentitypathvectorlimit
If the value of this object is 0 (zero) then Loop Detection for Path Vectors is disabled. Otherwise, if this object has a value greater than zero, then Loop Dection for Path Vectors is enabled, and the Path Vector Limit is this value. Also, the value of the object, 'mplsLdpLsrLoopDetectionCapable', must be set to either 'pathVector(4)' or 'hopCountAndPathVector(5)', if this object has a value greater than 0 (zero), otherwise it is ignored
**type**\: int
**range:** 0..255
.. attribute:: mplsldpentityhopcountlimit
If the value of this object is 0 (zero), then Loop Detection using Hop Counters is disabled. If the value of this object is greater than 0 (zero) then Loop Detection using Hop Counters is enabled, and this object specifies this Entity's maximum allowable value for the Hop Count. Also, the value of the object mplsLdpLsrLoopDetectionCapable must be set to either 'hopCount(3)' or 'hopCountAndPathVector(5)' if this object has a value greater than 0 (zero), otherwise it is ignored
**type**\: int
**range:** 0..255
.. attribute:: mplsldpentitytransportaddrkind
This specifies whether the loopback or interface address is to be used as the transport address in the transport address TLV of the hello message. If the value is interface(1), then the IP address of the interface from which hello messages are sent is used as the transport address in the hello message. Otherwise, if the value is loopback(2), then the IP address of the loopback interface is used as the transport address in the hello message
**type**\: :py:class:`MplsLdpEntityTransportAddrKind <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry.MplsLdpEntityTransportAddrKind>`
.. attribute:: mplsldpentitytargetpeer
If this LDP entity uses targeted peer then set this to true
**type**\: bool
.. attribute:: mplsldpentitytargetpeeraddrtype
The type of the internetwork layer address used for the Extended Discovery. This object indicates how the value of mplsLdpEntityTargetPeerAddr is to be interpreted
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: mplsldpentitytargetpeeraddr
The value of the internetwork layer address used for the Extended Discovery. The value of mplsLdpEntityTargetPeerAddrType specifies how this address is to be interpreted
**type**\: str
**length:** 0..255
.. attribute:: mplsldpentitylabeltype
Specifies the optional parameters for the LDP Initialization Message. If the value is generic(1) then no optional parameters will be sent in the LDP Initialization message associated with this Entity. If the value is atmParameters(2) then a row must be created in the mplsLdpEntityAtmTable, which corresponds to this entry. If the value is frameRelayParameters(3) then a row must be created in the mplsLdpEntityFrameRelayTable, which corresponds to this entry
**type**\: :py:class:`MplsLdpLabelType <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsLdpLabelType>`
.. attribute:: mplsldpentitydiscontinuitytime
The value of sysUpTime on the most recent occasion at which any one or more of this entity's counters suffered a discontinuity. The relevant counters are the specific instances associated with this entity of any Counter32 object contained in the 'mplsLdpEntityStatsTable'. If no such discontinuities have occurred since the last re\-initialization of the local management subsystem, then this object contains a zero value
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystoragetype
The storage type for this conceptual row. Conceptual rows having the value 'permanent(4)' need not allow write\-access to any columnar objects in the row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: mplsldpentityrowstatus
The status of this conceptual row. All writable objects in this row may be modified at any time, however, as described in detail in the section entitled, 'Changing Values After Session Establishment', and again described in the DESCRIPTION clause of the mplsLdpEntityAdminStatus object, if a session has been initiated with a Peer, changing objects in this table will wreak havoc with the session and interrupt traffic. To repeat again\: the recommended procedure is to set the mplsLdpEntityAdminStatus to down, thereby explicitly causing a session to be torn down. Then, change objects in this entry, then set the mplsLdpEntityAdminStatus to enable, which enables a new session to be initiated
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
.. attribute:: mplsldpentitystatssessionattempts
A count of the Session Initialization messages which were sent or received by this LDP Entity and were NAK'd. In other words, this counter counts the number of session initializations that failed. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatssessionrejectednohelloerrors
A count of the Session Rejected/No Hello Error Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatssessionrejectedaderrors
A count of the Session Rejected/Parameters Advertisement Mode Error Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatssessionrejectedmaxpduerrors
A count of the Session Rejected/Parameters Max Pdu Length Error Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatssessionrejectedlrerrors
A count of the Session Rejected/Parameters Label Range Notification Messages sent or received by this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatsbadldpidentifiererrors
This object counts the number of Bad LDP Identifier Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatsbadpdulengtherrors
This object counts the number of Bad PDU Length Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatsbadmessagelengtherrors
This object counts the number of Bad Message Length Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatsbadtlvlengtherrors
This object counts the number of Bad TLV Length Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatsmalformedtlvvalueerrors
This object counts the number of Malformed TLV Value Fatal Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatskeepalivetimerexperrors
This object counts the number of Session Keep Alive Timer Expired Errors detected by the session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatsshutdownreceivednotifications
This object counts the number of Shutdown Notifications received related to session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpentitystatsshutdownsentnotifications
This object counts the number of Shutdown Notfications sent related to session(s) (past and present) associated with this LDP Entity. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpEntityDiscontinuityTime
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry, self).__init__()
self.yang_name = "mplsLdpEntityEntry"
self.yang_parent_name = "mplsLdpEntityTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsldpentityldpid','mplsldpentityindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentityldpid', (YLeaf(YType.str, 'mplsLdpEntityLdpId'), ['str'])),
('mplsldpentityindex', (YLeaf(YType.uint32, 'mplsLdpEntityIndex'), ['int'])),
('mplsldpentityprotocolversion', (YLeaf(YType.uint32, 'mplsLdpEntityProtocolVersion'), ['int'])),
('mplsldpentityadminstatus', (YLeaf(YType.enumeration, 'mplsLdpEntityAdminStatus'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpEntityTable.MplsLdpEntityEntry.MplsLdpEntityAdminStatus')])),
('mplsldpentityoperstatus', (YLeaf(YType.enumeration, 'mplsLdpEntityOperStatus'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpEntityTable.MplsLdpEntityEntry.MplsLdpEntityOperStatus')])),
('mplsldpentitytcpport', (YLeaf(YType.uint16, 'mplsLdpEntityTcpPort'), ['int'])),
('mplsldpentityudpdscport', (YLeaf(YType.uint16, 'mplsLdpEntityUdpDscPort'), ['int'])),
('mplsldpentitymaxpdulength', (YLeaf(YType.uint32, 'mplsLdpEntityMaxPduLength'), ['int'])),
('mplsldpentitykeepaliveholdtimer', (YLeaf(YType.uint32, 'mplsLdpEntityKeepAliveHoldTimer'), ['int'])),
('mplsldpentityhelloholdtimer', (YLeaf(YType.uint32, 'mplsLdpEntityHelloHoldTimer'), ['int'])),
('mplsldpentityinitsessionthreshold', (YLeaf(YType.int32, 'mplsLdpEntityInitSessionThreshold'), ['int'])),
('mplsldpentitylabeldistmethod', (YLeaf(YType.enumeration, 'mplsLdpEntityLabelDistMethod'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsLabelDistributionMethod', '')])),
('mplsldpentitylabelretentionmode', (YLeaf(YType.enumeration, 'mplsLdpEntityLabelRetentionMode'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsRetentionMode', '')])),
('mplsldpentitypathvectorlimit', (YLeaf(YType.int32, 'mplsLdpEntityPathVectorLimit'), ['int'])),
('mplsldpentityhopcountlimit', (YLeaf(YType.int32, 'mplsLdpEntityHopCountLimit'), ['int'])),
('mplsldpentitytransportaddrkind', (YLeaf(YType.enumeration, 'mplsLdpEntityTransportAddrKind'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpEntityTable.MplsLdpEntityEntry.MplsLdpEntityTransportAddrKind')])),
('mplsldpentitytargetpeer', (YLeaf(YType.boolean, 'mplsLdpEntityTargetPeer'), ['bool'])),
('mplsldpentitytargetpeeraddrtype', (YLeaf(YType.enumeration, 'mplsLdpEntityTargetPeerAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('mplsldpentitytargetpeeraddr', (YLeaf(YType.str, 'mplsLdpEntityTargetPeerAddr'), ['str'])),
('mplsldpentitylabeltype', (YLeaf(YType.enumeration, 'mplsLdpEntityLabelType'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsLdpLabelType', '')])),
('mplsldpentitydiscontinuitytime', (YLeaf(YType.uint32, 'mplsLdpEntityDiscontinuityTime'), ['int'])),
('mplsldpentitystoragetype', (YLeaf(YType.enumeration, 'mplsLdpEntityStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('mplsldpentityrowstatus', (YLeaf(YType.enumeration, 'mplsLdpEntityRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
('mplsldpentitystatssessionattempts', (YLeaf(YType.uint32, 'mplsLdpEntityStatsSessionAttempts'), ['int'])),
('mplsldpentitystatssessionrejectednohelloerrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsSessionRejectedNoHelloErrors'), ['int'])),
('mplsldpentitystatssessionrejectedaderrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsSessionRejectedAdErrors'), ['int'])),
('mplsldpentitystatssessionrejectedmaxpduerrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsSessionRejectedMaxPduErrors'), ['int'])),
('mplsldpentitystatssessionrejectedlrerrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsSessionRejectedLRErrors'), ['int'])),
('mplsldpentitystatsbadldpidentifiererrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsBadLdpIdentifierErrors'), ['int'])),
('mplsldpentitystatsbadpdulengtherrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsBadPduLengthErrors'), ['int'])),
('mplsldpentitystatsbadmessagelengtherrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsBadMessageLengthErrors'), ['int'])),
('mplsldpentitystatsbadtlvlengtherrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsBadTlvLengthErrors'), ['int'])),
('mplsldpentitystatsmalformedtlvvalueerrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsMalformedTlvValueErrors'), ['int'])),
('mplsldpentitystatskeepalivetimerexperrors', (YLeaf(YType.uint32, 'mplsLdpEntityStatsKeepAliveTimerExpErrors'), ['int'])),
('mplsldpentitystatsshutdownreceivednotifications', (YLeaf(YType.uint32, 'mplsLdpEntityStatsShutdownReceivedNotifications'), ['int'])),
('mplsldpentitystatsshutdownsentnotifications', (YLeaf(YType.uint32, 'mplsLdpEntityStatsShutdownSentNotifications'), ['int'])),
])
self.mplsldpentityldpid = None
self.mplsldpentityindex = None
self.mplsldpentityprotocolversion = None
self.mplsldpentityadminstatus = None
self.mplsldpentityoperstatus = None
self.mplsldpentitytcpport = None
self.mplsldpentityudpdscport = None
self.mplsldpentitymaxpdulength = None
self.mplsldpentitykeepaliveholdtimer = None
self.mplsldpentityhelloholdtimer = None
self.mplsldpentityinitsessionthreshold = None
self.mplsldpentitylabeldistmethod = None
self.mplsldpentitylabelretentionmode = None
self.mplsldpentitypathvectorlimit = None
self.mplsldpentityhopcountlimit = None
self.mplsldpentitytransportaddrkind = None
self.mplsldpentitytargetpeer = None
self.mplsldpentitytargetpeeraddrtype = None
self.mplsldpentitytargetpeeraddr = None
self.mplsldpentitylabeltype = None
self.mplsldpentitydiscontinuitytime = None
self.mplsldpentitystoragetype = None
self.mplsldpentityrowstatus = None
self.mplsldpentitystatssessionattempts = None
self.mplsldpentitystatssessionrejectednohelloerrors = None
self.mplsldpentitystatssessionrejectedaderrors = None
self.mplsldpentitystatssessionrejectedmaxpduerrors = None
self.mplsldpentitystatssessionrejectedlrerrors = None
self.mplsldpentitystatsbadldpidentifiererrors = None
self.mplsldpentitystatsbadpdulengtherrors = None
self.mplsldpentitystatsbadmessagelengtherrors = None
self.mplsldpentitystatsbadtlvlengtherrors = None
self.mplsldpentitystatsmalformedtlvvalueerrors = None
self.mplsldpentitystatskeepalivetimerexperrors = None
self.mplsldpentitystatsshutdownreceivednotifications = None
self.mplsldpentitystatsshutdownsentnotifications = None
self._segment_path = lambda: "mplsLdpEntityEntry" + "[mplsLdpEntityLdpId='" + str(self.mplsldpentityldpid) + "']" + "[mplsLdpEntityIndex='" + str(self.mplsldpentityindex) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsLdpEntityTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry, [u'mplsldpentityldpid', u'mplsldpentityindex', u'mplsldpentityprotocolversion', u'mplsldpentityadminstatus', u'mplsldpentityoperstatus', u'mplsldpentitytcpport', u'mplsldpentityudpdscport', u'mplsldpentitymaxpdulength', u'mplsldpentitykeepaliveholdtimer', u'mplsldpentityhelloholdtimer', u'mplsldpentityinitsessionthreshold', u'mplsldpentitylabeldistmethod', u'mplsldpentitylabelretentionmode', u'mplsldpentitypathvectorlimit', u'mplsldpentityhopcountlimit', u'mplsldpentitytransportaddrkind', u'mplsldpentitytargetpeer', u'mplsldpentitytargetpeeraddrtype', u'mplsldpentitytargetpeeraddr', u'mplsldpentitylabeltype', u'mplsldpentitydiscontinuitytime', u'mplsldpentitystoragetype', u'mplsldpentityrowstatus', u'mplsldpentitystatssessionattempts', u'mplsldpentitystatssessionrejectednohelloerrors', u'mplsldpentitystatssessionrejectedaderrors', u'mplsldpentitystatssessionrejectedmaxpduerrors', u'mplsldpentitystatssessionrejectedlrerrors', u'mplsldpentitystatsbadldpidentifiererrors', u'mplsldpentitystatsbadpdulengtherrors', u'mplsldpentitystatsbadmessagelengtherrors', u'mplsldpentitystatsbadtlvlengtherrors', u'mplsldpentitystatsmalformedtlvvalueerrors', u'mplsldpentitystatskeepalivetimerexperrors', u'mplsldpentitystatsshutdownreceivednotifications', u'mplsldpentitystatsshutdownsentnotifications'], name, value)
class MplsLdpEntityAdminStatus(Enum):
"""
MplsLdpEntityAdminStatus (Enum Class)
The administrative status of this LDP Entity.
If this object is changed from 'enable' to 'disable'
and this entity has already attempted to establish
contact with a Peer, then all contact with that
Peer is lost and all information from that Peer
needs to be removed from the MIB. (This implies
that the network management subsystem should clean
up any related entry in the mplsLdpPeerTable. This
further implies that a 'tear\-down' for that session
is issued and the session and all information related
to that session cease to exist).
At this point the operator is able to change values
which are related to this entity.
When the admin status is set back to 'enable', then
this Entity will attempt to establish a new session
with the Peer.
.. data:: enable = 1
.. data:: disable = 2
"""
enable = Enum.YLeaf(1, "enable")
disable = Enum.YLeaf(2, "disable")
class MplsLdpEntityOperStatus(Enum):
"""
MplsLdpEntityOperStatus (Enum Class)
The operational status of this LDP Entity.
The value of unknown(1) indicates that the
operational status cannot be determined at
this time. The value of unknown should be
a transient condition before changing
to enabled(2) or disabled(3).
.. data:: unknown = 1
.. data:: enabled = 2
.. data:: disabled = 3
"""
unknown = Enum.YLeaf(1, "unknown")
enabled = Enum.YLeaf(2, "enabled")
disabled = Enum.YLeaf(3, "disabled")
class MplsLdpEntityTransportAddrKind(Enum):
"""
MplsLdpEntityTransportAddrKind (Enum Class)
This specifies whether the loopback or interface
address is to be used as the transport address
in the transport address TLV of the
hello message.
If the value is interface(1), then the IP
address of the interface from which hello
messages are sent is used as the transport
address in the hello message.
Otherwise, if the value is loopback(2), then the IP
address of the loopback interface is used as the
transport address in the hello message.
.. data:: interface = 1
.. data:: loopback = 2
"""
interface = Enum.YLeaf(1, "interface")
loopback = Enum.YLeaf(2, "loopback")
class MplsLdpPeerTable(Entity):
"""
Information about LDP peers known by Entities in
the mplsLdpEntityTable. The information in this table
is based on information from the Entity\-Peer interaction
during session initialization but is not appropriate
for the mplsLdpSessionTable, because objects in this
table may or may not be used in session establishment.
.. attribute:: mplsldppeerentry
Information about a single Peer which is related to a Session. This table is augmented by the mplsLdpSessionTable
**type**\: list of :py:class:`MplsLdpPeerEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpPeerTable, self).__init__()
self.yang_name = "mplsLdpPeerTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsLdpPeerEntry", ("mplsldppeerentry", MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry))])
self._leafs = OrderedDict()
self.mplsldppeerentry = YList(self)
self._segment_path = lambda: "mplsLdpPeerTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpPeerTable, [], name, value)
class MplsLdpPeerEntry(Entity):
"""
Information about a single Peer which is related
to a Session. This table is augmented by
the mplsLdpSessionTable.
.. attribute:: mplsldpentityldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldpentityldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldpentityindex (key)
**type**\: int
**range:** 1..4294967295
**refers to**\: :py:class:`mplsldpentityindex <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldppeerldpid (key)
The LDP identifier of this LDP Peer
**type**\: str
.. attribute:: mplsldppeerlabeldistmethod
For any given LDP session, the method of label distribution must be specified
**type**\: :py:class:`MplsLabelDistributionMethod <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsLabelDistributionMethod>`
.. attribute:: mplsldppeerpathvectorlimit
If the value of this object is 0 (zero) then Loop Dection for Path Vectors for this Peer is disabled. Otherwise, if this object has a value greater than zero, then Loop Dection for Path Vectors for this Peer is enabled and the Path Vector Limit is this value
**type**\: int
**range:** 0..255
.. attribute:: mplsldppeertransportaddrtype
The type of the Internet address for the mplsLdpPeerTransportAddr object. The LDP specification describes this as being either an IPv4 Transport Address or IPv6 Transport Address which is used in opening the LDP session's TCP connection, or if the optional TLV is not present, then this is the IPv4/IPv6 source address for the UPD packet carrying the Hellos. This object specifies how the value of the mplsLdpPeerTransportAddr object should be interpreted
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: mplsldppeertransportaddr
The Internet address advertised by the peer in the Hello Message or the Hello source address. The type of this address is specified by the value of the mplsLdpPeerTransportAddrType object
**type**\: str
**length:** 0..255
.. attribute:: mplsldpsessionstatelastchange
The value of sysUpTime at the time this Session entered its current state as denoted by the mplsLdpSessionState object
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpsessionstate
The current state of the session, all of the states 1 to 5 are based on the state machine for session negotiation behavior
**type**\: :py:class:`MplsLdpSessionState <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry.MplsLdpSessionState>`
.. attribute:: mplsldpsessionrole
During session establishment the LSR/LER takes either the active role or the passive role based on address comparisons. This object indicates whether this LSR/LER was behaving in an active role or passive role during this session's establishment. The value of unknown(1), indicates that the role is not able to be determined at the present time
**type**\: :py:class:`MplsLdpSessionRole <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry.MplsLdpSessionRole>`
.. attribute:: mplsldpsessionprotocolversion
The version of the LDP Protocol which this session is using. This is the version of the LDP protocol which has been negotiated during session initialization
**type**\: int
**range:** 1..65535
.. attribute:: mplsldpsessionkeepaliveholdtimerem
The keep alive hold time remaining for this session
**type**\: int
**range:** 0..2147483647
.. attribute:: mplsldpsessionkeepalivetime
The negotiated KeepAlive Time which represents the amount of seconds between keep alive messages. The mplsLdpEntityKeepAliveHoldTimer related to this Session is the value that was proposed as the KeepAlive Time for this session. This value is negotiated during session initialization between the entity's proposed value (i.e., the value configured in mplsLdpEntityKeepAliveHoldTimer) and the peer's proposed KeepAlive Hold Timer value. This value is the smaller of the two proposed values
**type**\: int
**range:** 1..65535
**units**\: seconds
.. attribute:: mplsldpsessionmaxpdulength
The value of maximum allowable length for LDP PDUs for this session. This value may have been negotiated during the Session Initialization. This object is related to the mplsLdpEntityMaxPduLength object. The mplsLdpEntityMaxPduLength object specifies the requested LDP PDU length, and this object reflects the negotiated LDP PDU length between the Entity and the Peer
**type**\: int
**range:** 1..65535
**units**\: octets
.. attribute:: mplsldpsessiondiscontinuitytime
The value of sysUpTime on the most recent occasion at which any one or more of this session's counters suffered a discontinuity. The relevant counters are the specific instances associated with this session of any Counter32 object contained in the mplsLdpSessionStatsTable. The initial value of this object is the value of sysUpTime when the entry was created in this table. Also, a command generator can distinguish when a session between a given Entity and Peer goes away and a new session is established. This value would change and thus indicate to the command generator that this is a different session
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpsessionstatsunknownmestypeerrors
This object counts the number of Unknown Message Type Errors detected by this LSR/LER during this session. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpSessionDiscontinuityTime
**type**\: int
**range:** 0..4294967295
.. attribute:: mplsldpsessionstatsunknowntlverrors
This object counts the number of Unknown TLV Errors detected by this LSR/LER during this session. Discontinuities in the value of this counter can occur at re\-initialization of the management system, and at other times as indicated by the value of mplsLdpSessionDiscontinuityTime
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry, self).__init__()
self.yang_name = "mplsLdpPeerEntry"
self.yang_parent_name = "mplsLdpPeerTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsldpentityldpid','mplsldpentityindex','mplsldppeerldpid']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentityldpid', (YLeaf(YType.str, 'mplsLdpEntityLdpId'), ['str'])),
('mplsldpentityindex', (YLeaf(YType.str, 'mplsLdpEntityIndex'), ['int'])),
('mplsldppeerldpid', (YLeaf(YType.str, 'mplsLdpPeerLdpId'), ['str'])),
('mplsldppeerlabeldistmethod', (YLeaf(YType.enumeration, 'mplsLdpPeerLabelDistMethod'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsLabelDistributionMethod', '')])),
('mplsldppeerpathvectorlimit', (YLeaf(YType.int32, 'mplsLdpPeerPathVectorLimit'), ['int'])),
('mplsldppeertransportaddrtype', (YLeaf(YType.enumeration, 'mplsLdpPeerTransportAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('mplsldppeertransportaddr', (YLeaf(YType.str, 'mplsLdpPeerTransportAddr'), ['str'])),
('mplsldpsessionstatelastchange', (YLeaf(YType.uint32, 'mplsLdpSessionStateLastChange'), ['int'])),
('mplsldpsessionstate', (YLeaf(YType.enumeration, 'mplsLdpSessionState'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpPeerTable.MplsLdpPeerEntry.MplsLdpSessionState')])),
('mplsldpsessionrole', (YLeaf(YType.enumeration, 'mplsLdpSessionRole'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpPeerTable.MplsLdpPeerEntry.MplsLdpSessionRole')])),
('mplsldpsessionprotocolversion', (YLeaf(YType.uint32, 'mplsLdpSessionProtocolVersion'), ['int'])),
('mplsldpsessionkeepaliveholdtimerem', (YLeaf(YType.int32, 'mplsLdpSessionKeepAliveHoldTimeRem'), ['int'])),
('mplsldpsessionkeepalivetime', (YLeaf(YType.uint32, 'mplsLdpSessionKeepAliveTime'), ['int'])),
('mplsldpsessionmaxpdulength', (YLeaf(YType.uint32, 'mplsLdpSessionMaxPduLength'), ['int'])),
('mplsldpsessiondiscontinuitytime', (YLeaf(YType.uint32, 'mplsLdpSessionDiscontinuityTime'), ['int'])),
('mplsldpsessionstatsunknownmestypeerrors', (YLeaf(YType.uint32, 'mplsLdpSessionStatsUnknownMesTypeErrors'), ['int'])),
('mplsldpsessionstatsunknowntlverrors', (YLeaf(YType.uint32, 'mplsLdpSessionStatsUnknownTlvErrors'), ['int'])),
])
self.mplsldpentityldpid = None
self.mplsldpentityindex = None
self.mplsldppeerldpid = None
self.mplsldppeerlabeldistmethod = None
self.mplsldppeerpathvectorlimit = None
self.mplsldppeertransportaddrtype = None
self.mplsldppeertransportaddr = None
self.mplsldpsessionstatelastchange = None
self.mplsldpsessionstate = None
self.mplsldpsessionrole = None
self.mplsldpsessionprotocolversion = None
self.mplsldpsessionkeepaliveholdtimerem = None
self.mplsldpsessionkeepalivetime = None
self.mplsldpsessionmaxpdulength = None
self.mplsldpsessiondiscontinuitytime = None
self.mplsldpsessionstatsunknownmestypeerrors = None
self.mplsldpsessionstatsunknowntlverrors = None
self._segment_path = lambda: "mplsLdpPeerEntry" + "[mplsLdpEntityLdpId='" + str(self.mplsldpentityldpid) + "']" + "[mplsLdpEntityIndex='" + str(self.mplsldpentityindex) + "']" + "[mplsLdpPeerLdpId='" + str(self.mplsldppeerldpid) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsLdpPeerTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry, [u'mplsldpentityldpid', u'mplsldpentityindex', u'mplsldppeerldpid', u'mplsldppeerlabeldistmethod', u'mplsldppeerpathvectorlimit', u'mplsldppeertransportaddrtype', u'mplsldppeertransportaddr', u'mplsldpsessionstatelastchange', u'mplsldpsessionstate', u'mplsldpsessionrole', u'mplsldpsessionprotocolversion', u'mplsldpsessionkeepaliveholdtimerem', u'mplsldpsessionkeepalivetime', u'mplsldpsessionmaxpdulength', u'mplsldpsessiondiscontinuitytime', u'mplsldpsessionstatsunknownmestypeerrors', u'mplsldpsessionstatsunknowntlverrors'], name, value)
class MplsLdpSessionRole(Enum):
"""
MplsLdpSessionRole (Enum Class)
During session establishment the LSR/LER takes either
the active role or the passive role based on address
comparisons. This object indicates whether this LSR/LER
was behaving in an active role or passive role during
this session's establishment.
The value of unknown(1), indicates that the role is not
able to be determined at the present time.
.. data:: unknown = 1
.. data:: active = 2
.. data:: passive = 3
"""
unknown = Enum.YLeaf(1, "unknown")
active = Enum.YLeaf(2, "active")
passive = Enum.YLeaf(3, "passive")
class MplsLdpSessionState(Enum):
"""
MplsLdpSessionState (Enum Class)
The current state of the session, all of the
states 1 to 5 are based on the state machine
for session negotiation behavior.
.. data:: nonexistent = 1
.. data:: initialized = 2
.. data:: openrec = 3
.. data:: opensent = 4
.. data:: operational = 5
"""
nonexistent = Enum.YLeaf(1, "nonexistent")
initialized = Enum.YLeaf(2, "initialized")
openrec = Enum.YLeaf(3, "openrec")
opensent = Enum.YLeaf(4, "opensent")
operational = Enum.YLeaf(5, "operational")
class MplsLdpHelloAdjacencyTable(Entity):
"""
A table of Hello Adjacencies for Sessions.
.. attribute:: mplsldphelloadjacencyentry
Each row represents a single LDP Hello Adjacency. An LDP Session can have one or more Hello Adjacencies
**type**\: list of :py:class:`MplsLdpHelloAdjacencyEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable.MplsLdpHelloAdjacencyEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable, self).__init__()
self.yang_name = "mplsLdpHelloAdjacencyTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsLdpHelloAdjacencyEntry", ("mplsldphelloadjacencyentry", MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable.MplsLdpHelloAdjacencyEntry))])
self._leafs = OrderedDict()
self.mplsldphelloadjacencyentry = YList(self)
self._segment_path = lambda: "mplsLdpHelloAdjacencyTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable, [], name, value)
class MplsLdpHelloAdjacencyEntry(Entity):
"""
Each row represents a single LDP Hello Adjacency.
An LDP Session can have one or more Hello
Adjacencies.
.. attribute:: mplsldpentityldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldpentityldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldpentityindex (key)
**type**\: int
**range:** 1..4294967295
**refers to**\: :py:class:`mplsldpentityindex <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldppeerldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldppeerldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry>`
.. attribute:: mplsldphelloadjacencyindex (key)
An identifier for this specific adjacency
**type**\: int
**range:** 1..4294967295
.. attribute:: mplsldphelloadjacencyholdtimerem
If the value of this object is 65535, this means that the hold time is infinite (i.e., wait forever). Otherwise, the time remaining for this Hello Adjacency to receive its next Hello Message. This interval will change when the 'next' Hello Message which corresponds to this Hello Adjacency is received unless it is infinite
**type**\: int
**range:** 0..2147483647
**units**\: seconds
.. attribute:: mplsldphelloadjacencyholdtime
The Hello hold time which is negotiated between the Entity and the Peer. The entity associated with this Hello Adjacency issues a proposed Hello Hold Time value in the mplsLdpEntityHelloHoldTimer object. The peer also proposes a value and this object represents the negotiated value. A value of 0 means the default, which is 15 seconds for Link Hellos and 45 seconds for Targeted Hellos. A value of 65535 indicates an infinite hold time
**type**\: int
**range:** 0..65535
.. attribute:: mplsldphelloadjacencytype
This adjacency is the result of a 'link' hello if the value of this object is link(1). Otherwise, it is a result of a 'targeted' hello, targeted(2)
**type**\: :py:class:`MplsLdpHelloAdjacencyType <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable.MplsLdpHelloAdjacencyEntry.MplsLdpHelloAdjacencyType>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable.MplsLdpHelloAdjacencyEntry, self).__init__()
self.yang_name = "mplsLdpHelloAdjacencyEntry"
self.yang_parent_name = "mplsLdpHelloAdjacencyTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsldpentityldpid','mplsldpentityindex','mplsldppeerldpid','mplsldphelloadjacencyindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentityldpid', (YLeaf(YType.str, 'mplsLdpEntityLdpId'), ['str'])),
('mplsldpentityindex', (YLeaf(YType.str, 'mplsLdpEntityIndex'), ['int'])),
('mplsldppeerldpid', (YLeaf(YType.str, 'mplsLdpPeerLdpId'), ['str'])),
('mplsldphelloadjacencyindex', (YLeaf(YType.uint32, 'mplsLdpHelloAdjacencyIndex'), ['int'])),
('mplsldphelloadjacencyholdtimerem', (YLeaf(YType.int32, 'mplsLdpHelloAdjacencyHoldTimeRem'), ['int'])),
('mplsldphelloadjacencyholdtime', (YLeaf(YType.uint32, 'mplsLdpHelloAdjacencyHoldTime'), ['int'])),
('mplsldphelloadjacencytype', (YLeaf(YType.enumeration, 'mplsLdpHelloAdjacencyType'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpHelloAdjacencyTable.MplsLdpHelloAdjacencyEntry.MplsLdpHelloAdjacencyType')])),
])
self.mplsldpentityldpid = None
self.mplsldpentityindex = None
self.mplsldppeerldpid = None
self.mplsldphelloadjacencyindex = None
self.mplsldphelloadjacencyholdtimerem = None
self.mplsldphelloadjacencyholdtime = None
self.mplsldphelloadjacencytype = None
self._segment_path = lambda: "mplsLdpHelloAdjacencyEntry" + "[mplsLdpEntityLdpId='" + str(self.mplsldpentityldpid) + "']" + "[mplsLdpEntityIndex='" + str(self.mplsldpentityindex) + "']" + "[mplsLdpPeerLdpId='" + str(self.mplsldppeerldpid) + "']" + "[mplsLdpHelloAdjacencyIndex='" + str(self.mplsldphelloadjacencyindex) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsLdpHelloAdjacencyTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpHelloAdjacencyTable.MplsLdpHelloAdjacencyEntry, [u'mplsldpentityldpid', u'mplsldpentityindex', u'mplsldppeerldpid', u'mplsldphelloadjacencyindex', u'mplsldphelloadjacencyholdtimerem', u'mplsldphelloadjacencyholdtime', u'mplsldphelloadjacencytype'], name, value)
class MplsLdpHelloAdjacencyType(Enum):
"""
MplsLdpHelloAdjacencyType (Enum Class)
This adjacency is the result of a 'link'
hello if the value of this object is link(1).
Otherwise, it is a result of a 'targeted'
hello, targeted(2).
.. data:: link = 1
.. data:: targeted = 2
"""
link = Enum.YLeaf(1, "link")
targeted = Enum.YLeaf(2, "targeted")
class MplsInSegmentLdpLspTable(Entity):
"""
A table of LDP LSP's which
map to the mplsInSegmentTable in the
MPLS\-LSR\-STD\-MIB module.
.. attribute:: mplsinsegmentldplspentry
An entry in this table represents information on a single LDP LSP which is represented by a session's index triple (mplsLdpEntityLdpId, mplsLdpEntityIndex, mplsLdpPeerLdpId) AND the index for the mplsInSegmentTable (mplsInSegmentLdpLspLabelIndex) from the MPLS\-LSR\-STD\-MIB. The information contained in a row is read\-only
**type**\: list of :py:class:`MplsInSegmentLdpLspEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsInSegmentLdpLspTable.MplsInSegmentLdpLspEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsInSegmentLdpLspTable, self).__init__()
self.yang_name = "mplsInSegmentLdpLspTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsInSegmentLdpLspEntry", ("mplsinsegmentldplspentry", MPLSLDPSTDMIB.MplsInSegmentLdpLspTable.MplsInSegmentLdpLspEntry))])
self._leafs = OrderedDict()
self.mplsinsegmentldplspentry = YList(self)
self._segment_path = lambda: "mplsInSegmentLdpLspTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsInSegmentLdpLspTable, [], name, value)
class MplsInSegmentLdpLspEntry(Entity):
"""
An entry in this table represents information
on a single LDP LSP which is represented by
a session's index triple (mplsLdpEntityLdpId,
mplsLdpEntityIndex, mplsLdpPeerLdpId) AND the
index for the mplsInSegmentTable
(mplsInSegmentLdpLspLabelIndex) from the
MPLS\-LSR\-STD\-MIB.
The information contained in a row is read\-only.
.. attribute:: mplsldpentityldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldpentityldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldpentityindex (key)
**type**\: int
**range:** 1..4294967295
**refers to**\: :py:class:`mplsldpentityindex <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldppeerldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldppeerldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry>`
.. attribute:: mplsinsegmentldplspindex (key)
This contains the same value as the mplsInSegmentIndex in the MPLS\-LSR\-STD\-MIB's mplsInSegmentTable
**type**\: str
**length:** 1..24
.. attribute:: mplsinsegmentldplsplabeltype
The Layer 2 Label Type
**type**\: :py:class:`MplsLdpLabelType <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsLdpLabelType>`
.. attribute:: mplsinsegmentldplsptype
The type of LSP connection
**type**\: :py:class:`MplsLspType <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsLspType>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsInSegmentLdpLspTable.MplsInSegmentLdpLspEntry, self).__init__()
self.yang_name = "mplsInSegmentLdpLspEntry"
self.yang_parent_name = "mplsInSegmentLdpLspTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsldpentityldpid','mplsldpentityindex','mplsldppeerldpid','mplsinsegmentldplspindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentityldpid', (YLeaf(YType.str, 'mplsLdpEntityLdpId'), ['str'])),
('mplsldpentityindex', (YLeaf(YType.str, 'mplsLdpEntityIndex'), ['int'])),
('mplsldppeerldpid', (YLeaf(YType.str, 'mplsLdpPeerLdpId'), ['str'])),
('mplsinsegmentldplspindex', (YLeaf(YType.str, 'mplsInSegmentLdpLspIndex'), ['str'])),
('mplsinsegmentldplsplabeltype', (YLeaf(YType.enumeration, 'mplsInSegmentLdpLspLabelType'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsLdpLabelType', '')])),
('mplsinsegmentldplsptype', (YLeaf(YType.enumeration, 'mplsInSegmentLdpLspType'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsLspType', '')])),
])
self.mplsldpentityldpid = None
self.mplsldpentityindex = None
self.mplsldppeerldpid = None
self.mplsinsegmentldplspindex = None
self.mplsinsegmentldplsplabeltype = None
self.mplsinsegmentldplsptype = None
self._segment_path = lambda: "mplsInSegmentLdpLspEntry" + "[mplsLdpEntityLdpId='" + str(self.mplsldpentityldpid) + "']" + "[mplsLdpEntityIndex='" + str(self.mplsldpentityindex) + "']" + "[mplsLdpPeerLdpId='" + str(self.mplsldppeerldpid) + "']" + "[mplsInSegmentLdpLspIndex='" + str(self.mplsinsegmentldplspindex) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsInSegmentLdpLspTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsInSegmentLdpLspTable.MplsInSegmentLdpLspEntry, [u'mplsldpentityldpid', u'mplsldpentityindex', u'mplsldppeerldpid', u'mplsinsegmentldplspindex', u'mplsinsegmentldplsplabeltype', u'mplsinsegmentldplsptype'], name, value)
class MplsOutSegmentLdpLspTable(Entity):
"""
A table of LDP LSP's which
map to the mplsOutSegmentTable in the
MPLS\-LSR\-STD\-MIB.
.. attribute:: mplsoutsegmentldplspentry
An entry in this table represents information on a single LDP LSP which is represented by a session's index triple (mplsLdpEntityLdpId, mplsLdpEntityIndex, mplsLdpPeerLdpId) AND the index (mplsOutSegmentLdpLspIndex) for the mplsOutSegmentTable. The information contained in a row is read\-only
**type**\: list of :py:class:`MplsOutSegmentLdpLspEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable.MplsOutSegmentLdpLspEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable, self).__init__()
self.yang_name = "mplsOutSegmentLdpLspTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsOutSegmentLdpLspEntry", ("mplsoutsegmentldplspentry", MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable.MplsOutSegmentLdpLspEntry))])
self._leafs = OrderedDict()
self.mplsoutsegmentldplspentry = YList(self)
self._segment_path = lambda: "mplsOutSegmentLdpLspTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable, [], name, value)
class MplsOutSegmentLdpLspEntry(Entity):
"""
An entry in this table represents information
on a single LDP LSP which is represented by
a session's index triple (mplsLdpEntityLdpId,
mplsLdpEntityIndex, mplsLdpPeerLdpId) AND the
index (mplsOutSegmentLdpLspIndex)
for the mplsOutSegmentTable.
The information contained in a row is read\-only.
.. attribute:: mplsldpentityldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldpentityldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldpentityindex (key)
**type**\: int
**range:** 1..4294967295
**refers to**\: :py:class:`mplsldpentityindex <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldppeerldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldppeerldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry>`
.. attribute:: mplsoutsegmentldplspindex (key)
This contains the same value as the mplsOutSegmentIndex in the MPLS\-LSR\-STD\-MIB's mplsOutSegmentTable
**type**\: str
**length:** 1..24
.. attribute:: mplsoutsegmentldplsplabeltype
The Layer 2 Label Type
**type**\: :py:class:`MplsLdpLabelType <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsLdpLabelType>`
.. attribute:: mplsoutsegmentldplsptype
The type of LSP connection
**type**\: :py:class:`MplsLspType <ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB.MplsLspType>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable.MplsOutSegmentLdpLspEntry, self).__init__()
self.yang_name = "mplsOutSegmentLdpLspEntry"
self.yang_parent_name = "mplsOutSegmentLdpLspTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsldpentityldpid','mplsldpentityindex','mplsldppeerldpid','mplsoutsegmentldplspindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentityldpid', (YLeaf(YType.str, 'mplsLdpEntityLdpId'), ['str'])),
('mplsldpentityindex', (YLeaf(YType.str, 'mplsLdpEntityIndex'), ['int'])),
('mplsldppeerldpid', (YLeaf(YType.str, 'mplsLdpPeerLdpId'), ['str'])),
('mplsoutsegmentldplspindex', (YLeaf(YType.str, 'mplsOutSegmentLdpLspIndex'), ['str'])),
('mplsoutsegmentldplsplabeltype', (YLeaf(YType.enumeration, 'mplsOutSegmentLdpLspLabelType'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsLdpLabelType', '')])),
('mplsoutsegmentldplsptype', (YLeaf(YType.enumeration, 'mplsOutSegmentLdpLspType'), [('ydk.models.cisco_ios_xe.MPLS_TC_STD_MIB', 'MplsLspType', '')])),
])
self.mplsldpentityldpid = None
self.mplsldpentityindex = None
self.mplsldppeerldpid = None
self.mplsoutsegmentldplspindex = None
self.mplsoutsegmentldplsplabeltype = None
self.mplsoutsegmentldplsptype = None
self._segment_path = lambda: "mplsOutSegmentLdpLspEntry" + "[mplsLdpEntityLdpId='" + str(self.mplsldpentityldpid) + "']" + "[mplsLdpEntityIndex='" + str(self.mplsldpentityindex) + "']" + "[mplsLdpPeerLdpId='" + str(self.mplsldppeerldpid) + "']" + "[mplsOutSegmentLdpLspIndex='" + str(self.mplsoutsegmentldplspindex) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsOutSegmentLdpLspTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsOutSegmentLdpLspTable.MplsOutSegmentLdpLspEntry, [u'mplsldpentityldpid', u'mplsldpentityindex', u'mplsldppeerldpid', u'mplsoutsegmentldplspindex', u'mplsoutsegmentldplsplabeltype', u'mplsoutsegmentldplsptype'], name, value)
class MplsFecTable(Entity):
"""
This table represents the FEC
(Forwarding Equivalence Class)
Information associated with an LSP.
.. attribute:: mplsfecentry
Each row represents a single FEC Element
**type**\: list of :py:class:`MplsFecEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsFecTable.MplsFecEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsFecTable, self).__init__()
self.yang_name = "mplsFecTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsFecEntry", ("mplsfecentry", MPLSLDPSTDMIB.MplsFecTable.MplsFecEntry))])
self._leafs = OrderedDict()
self.mplsfecentry = YList(self)
self._segment_path = lambda: "mplsFecTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsFecTable, [], name, value)
class MplsFecEntry(Entity):
"""
Each row represents a single FEC Element.
.. attribute:: mplsfecindex (key)
The index which uniquely identifies this entry
**type**\: int
**range:** 1..4294967295
.. attribute:: mplsfectype
The type of the FEC. If the value of this object is 'prefix(1)' then the FEC type described by this row is an address prefix. If the value of this object is 'hostAddress(2)' then the FEC type described by this row is a host address
**type**\: :py:class:`MplsFecType <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsFecTable.MplsFecEntry.MplsFecType>`
.. attribute:: mplsfecaddrprefixlength
If the value of the 'mplsFecType' is 'hostAddress(2)' then this object is undefined. If the value of 'mplsFecType' is 'prefix(1)' then the value of this object is the length in bits of the address prefix represented by 'mplsFecAddr', or zero. If the value of this object is zero, this indicates that the prefix matches all addresses. In this case the address prefix MUST also be zero (i.e., 'mplsFecAddr' should have the value of zero.)
**type**\: int
**range:** 0..2040
.. attribute:: mplsfecaddrtype
The value of this object is the type of the Internet address. The value of this object, decides how the value of the mplsFecAddr object is interpreted
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: mplsfecaddr
The value of this object is interpreted based on the value of the 'mplsFecAddrType' object. This address is then further interpretted as an being used with the address prefix, or as the host address. This further interpretation is indicated by the 'mplsFecType' object. In other words, the FEC element is populated according to the Prefix FEC Element value encoding, or the Host Address FEC Element encoding
**type**\: str
**length:** 0..255
.. attribute:: mplsfecstoragetype
The storage type for this conceptual row. Conceptual rows having the value 'permanent(4)' need not allow write\-access to any columnar objects in the row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: mplsfecrowstatus
The status of this conceptual row. If the value of this object is 'active(1)', then none of the writable objects of this entry can be modified, except to set this object to 'destroy(6)'. NOTE\: if this row is being referenced by any entry in the mplsLdpLspFecTable, then a request to destroy this row, will result in an inconsistentValue error
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsFecTable.MplsFecEntry, self).__init__()
self.yang_name = "mplsFecEntry"
self.yang_parent_name = "mplsFecTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsfecindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsfecindex', (YLeaf(YType.uint32, 'mplsFecIndex'), ['int'])),
('mplsfectype', (YLeaf(YType.enumeration, 'mplsFecType'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsFecTable.MplsFecEntry.MplsFecType')])),
('mplsfecaddrprefixlength', (YLeaf(YType.uint32, 'mplsFecAddrPrefixLength'), ['int'])),
('mplsfecaddrtype', (YLeaf(YType.enumeration, 'mplsFecAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('mplsfecaddr', (YLeaf(YType.str, 'mplsFecAddr'), ['str'])),
('mplsfecstoragetype', (YLeaf(YType.enumeration, 'mplsFecStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('mplsfecrowstatus', (YLeaf(YType.enumeration, 'mplsFecRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.mplsfecindex = None
self.mplsfectype = None
self.mplsfecaddrprefixlength = None
self.mplsfecaddrtype = None
self.mplsfecaddr = None
self.mplsfecstoragetype = None
self.mplsfecrowstatus = None
self._segment_path = lambda: "mplsFecEntry" + "[mplsFecIndex='" + str(self.mplsfecindex) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsFecTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsFecTable.MplsFecEntry, [u'mplsfecindex', u'mplsfectype', u'mplsfecaddrprefixlength', u'mplsfecaddrtype', u'mplsfecaddr', u'mplsfecstoragetype', u'mplsfecrowstatus'], name, value)
class MplsFecType(Enum):
"""
MplsFecType (Enum Class)
The type of the FEC. If the value of this object
is 'prefix(1)' then the FEC type described by this
row is an address prefix.
If the value of this object is 'hostAddress(2)' then
the FEC type described by this row is a host address.
.. data:: prefix = 1
.. data:: hostAddress = 2
"""
prefix = Enum.YLeaf(1, "prefix")
hostAddress = Enum.YLeaf(2, "hostAddress")
class MplsLdpLspFecTable(Entity):
"""
A table which shows the relationship between
LDP LSPs and FECs. Each row represents
a single LDP LSP to FEC association.
.. attribute:: mplsldplspfecentry
An entry represents a LDP LSP to FEC association
**type**\: list of :py:class:`MplsLdpLspFecEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpLspFecTable.MplsLdpLspFecEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpLspFecTable, self).__init__()
self.yang_name = "mplsLdpLspFecTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsLdpLspFecEntry", ("mplsldplspfecentry", MPLSLDPSTDMIB.MplsLdpLspFecTable.MplsLdpLspFecEntry))])
self._leafs = OrderedDict()
self.mplsldplspfecentry = YList(self)
self._segment_path = lambda: "mplsLdpLspFecTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpLspFecTable, [], name, value)
class MplsLdpLspFecEntry(Entity):
"""
An entry represents a LDP LSP
to FEC association.
.. attribute:: mplsldpentityldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldpentityldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldpentityindex (key)
**type**\: int
**range:** 1..4294967295
**refers to**\: :py:class:`mplsldpentityindex <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldppeerldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldppeerldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry>`
.. attribute:: mplsldplspfecsegment (key)
If the value is inSegment(1), then this indicates that the following index, mplsLdpLspFecSegmentIndex, contains the same value as the mplsInSegmentLdpLspIndex. Otherwise, if the value of this object is outSegment(2), then this indicates that following index, mplsLdpLspFecSegmentIndex, contains the same value as the mplsOutSegmentLdpLspIndex
**type**\: :py:class:`MplsLdpLspFecSegment <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpLspFecTable.MplsLdpLspFecEntry.MplsLdpLspFecSegment>`
.. attribute:: mplsldplspfecsegmentindex (key)
This index is interpretted by using the value of the mplsLdpLspFecSegment. If the mplsLdpLspFecSegment is inSegment(1), then this index has the same value as mplsInSegmentLdpLspIndex. If the mplsLdpLspFecSegment is outSegment(2), then this index has the same value as mplsOutSegmentLdpLspIndex
**type**\: str
**length:** 1..24
.. attribute:: mplsldplspfecindex (key)
This index identifies the FEC entry in the mplsFecTable associated with this session. In other words, the value of this index is the same as the value of the mplsFecIndex that denotes the FEC associated with this Session
**type**\: int
**range:** 1..4294967295
.. attribute:: mplsldplspfecstoragetype
The storage type for this conceptual row. Conceptual rows having the value 'permanent(4)' need not allow write\-access to any columnar objects in the row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: mplsldplspfecrowstatus
The status of this conceptual row. If the value of this object is 'active(1)', then none of the writable objects of this entry can be modified. The Agent should delete this row when the session ceases to exist. If an operator wants to associate the session with a different FEC, the recommended procedure is (as described in detail in the section entitled, 'Changing Values After Session Establishment', and again described in the DESCRIPTION clause of the mplsLdpEntityAdminStatus object) is to set the mplsLdpEntityAdminStatus to down, thereby explicitly causing a session to be torn down. This will also cause this entry to be deleted. Then, set the mplsLdpEntityAdminStatus to enable which enables a new session to be initiated. Once the session is initiated, an entry may be added to this table to associate the new session with a FEC
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpLspFecTable.MplsLdpLspFecEntry, self).__init__()
self.yang_name = "mplsLdpLspFecEntry"
self.yang_parent_name = "mplsLdpLspFecTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsldpentityldpid','mplsldpentityindex','mplsldppeerldpid','mplsldplspfecsegment','mplsldplspfecsegmentindex','mplsldplspfecindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentityldpid', (YLeaf(YType.str, 'mplsLdpEntityLdpId'), ['str'])),
('mplsldpentityindex', (YLeaf(YType.str, 'mplsLdpEntityIndex'), ['int'])),
('mplsldppeerldpid', (YLeaf(YType.str, 'mplsLdpPeerLdpId'), ['str'])),
('mplsldplspfecsegment', (YLeaf(YType.enumeration, 'mplsLdpLspFecSegment'), [('ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB', 'MPLSLDPSTDMIB', 'MplsLdpLspFecTable.MplsLdpLspFecEntry.MplsLdpLspFecSegment')])),
('mplsldplspfecsegmentindex', (YLeaf(YType.str, 'mplsLdpLspFecSegmentIndex'), ['str'])),
('mplsldplspfecindex', (YLeaf(YType.uint32, 'mplsLdpLspFecIndex'), ['int'])),
('mplsldplspfecstoragetype', (YLeaf(YType.enumeration, 'mplsLdpLspFecStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('mplsldplspfecrowstatus', (YLeaf(YType.enumeration, 'mplsLdpLspFecRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.mplsldpentityldpid = None
self.mplsldpentityindex = None
self.mplsldppeerldpid = None
self.mplsldplspfecsegment = None
self.mplsldplspfecsegmentindex = None
self.mplsldplspfecindex = None
self.mplsldplspfecstoragetype = None
self.mplsldplspfecrowstatus = None
self._segment_path = lambda: "mplsLdpLspFecEntry" + "[mplsLdpEntityLdpId='" + str(self.mplsldpentityldpid) + "']" + "[mplsLdpEntityIndex='" + str(self.mplsldpentityindex) + "']" + "[mplsLdpPeerLdpId='" + str(self.mplsldppeerldpid) + "']" + "[mplsLdpLspFecSegment='" + str(self.mplsldplspfecsegment) + "']" + "[mplsLdpLspFecSegmentIndex='" + str(self.mplsldplspfecsegmentindex) + "']" + "[mplsLdpLspFecIndex='" + str(self.mplsldplspfecindex) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsLdpLspFecTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpLspFecTable.MplsLdpLspFecEntry, [u'mplsldpentityldpid', u'mplsldpentityindex', u'mplsldppeerldpid', u'mplsldplspfecsegment', u'mplsldplspfecsegmentindex', u'mplsldplspfecindex', u'mplsldplspfecstoragetype', u'mplsldplspfecrowstatus'], name, value)
class MplsLdpLspFecSegment(Enum):
"""
MplsLdpLspFecSegment (Enum Class)
If the value is inSegment(1), then this
indicates that the following index,
mplsLdpLspFecSegmentIndex, contains the same
value as the mplsInSegmentLdpLspIndex.
Otherwise, if the value of this object is
outSegment(2), then this
indicates that following index,
mplsLdpLspFecSegmentIndex, contains the same
value as the mplsOutSegmentLdpLspIndex.
.. data:: inSegment = 1
.. data:: outSegment = 2
"""
inSegment = Enum.YLeaf(1, "inSegment")
outSegment = Enum.YLeaf(2, "outSegment")
class MplsLdpSessionPeerAddrTable(Entity):
"""
This table 'extends' the mplsLdpSessionTable.
This table is used to store Label Address Information
from Label Address Messages received by this LSR from
Peers. This table is read\-only and should be updated
when Label Withdraw Address Messages are received, i.e.,
Rows should be deleted as appropriate.
NOTE\: since more than one address may be contained
in a Label Address Message, this table 'sparse augments',
the mplsLdpSessionTable's information.
.. attribute:: mplsldpsessionpeeraddrentry
An entry in this table represents information on a session's single next hop address which was advertised in an Address Message from the LDP peer. The information contained in a row is read\-only
**type**\: list of :py:class:`MplsLdpSessionPeerAddrEntry <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable.MplsLdpSessionPeerAddrEntry>`
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable, self).__init__()
self.yang_name = "mplsLdpSessionPeerAddrTable"
self.yang_parent_name = "MPLS-LDP-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("mplsLdpSessionPeerAddrEntry", ("mplsldpsessionpeeraddrentry", MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable.MplsLdpSessionPeerAddrEntry))])
self._leafs = OrderedDict()
self.mplsldpsessionpeeraddrentry = YList(self)
self._segment_path = lambda: "mplsLdpSessionPeerAddrTable"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable, [], name, value)
class MplsLdpSessionPeerAddrEntry(Entity):
"""
An entry in this table represents information on
a session's single next hop address which was
advertised in an Address Message from the LDP peer.
The information contained in a row is read\-only.
.. attribute:: mplsldpentityldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldpentityldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldpentityindex (key)
**type**\: int
**range:** 1..4294967295
**refers to**\: :py:class:`mplsldpentityindex <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpEntityTable.MplsLdpEntityEntry>`
.. attribute:: mplsldppeerldpid (key)
**type**\: str
**refers to**\: :py:class:`mplsldppeerldpid <ydk.models.cisco_ios_xe.MPLS_LDP_STD_MIB.MPLSLDPSTDMIB.MplsLdpPeerTable.MplsLdpPeerEntry>`
.. attribute:: mplsldpsessionpeeraddrindex (key)
An index which uniquely identifies this entry within a given session
**type**\: int
**range:** 1..4294967295
.. attribute:: mplsldpsessionpeernexthopaddrtype
The internetwork layer address type of this Next Hop Address as specified in the Label Address Message associated with this Session. The value of this object indicates how to interpret the value of mplsLdpSessionPeerNextHopAddr
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: mplsldpsessionpeernexthopaddr
The next hop address. The type of this address is specified by the value of the mplsLdpSessionPeerNextHopAddrType
**type**\: str
**length:** 0..255
"""
_prefix = 'MPLS-LDP-STD-MIB'
_revision = '2004-06-03'
def __init__(self):
super(MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable.MplsLdpSessionPeerAddrEntry, self).__init__()
self.yang_name = "mplsLdpSessionPeerAddrEntry"
self.yang_parent_name = "mplsLdpSessionPeerAddrTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsldpentityldpid','mplsldpentityindex','mplsldppeerldpid','mplsldpsessionpeeraddrindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsldpentityldpid', (YLeaf(YType.str, 'mplsLdpEntityLdpId'), ['str'])),
('mplsldpentityindex', (YLeaf(YType.str, 'mplsLdpEntityIndex'), ['int'])),
('mplsldppeerldpid', (YLeaf(YType.str, 'mplsLdpPeerLdpId'), ['str'])),
('mplsldpsessionpeeraddrindex', (YLeaf(YType.uint32, 'mplsLdpSessionPeerAddrIndex'), ['int'])),
('mplsldpsessionpeernexthopaddrtype', (YLeaf(YType.enumeration, 'mplsLdpSessionPeerNextHopAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('mplsldpsessionpeernexthopaddr', (YLeaf(YType.str, 'mplsLdpSessionPeerNextHopAddr'), ['str'])),
])
self.mplsldpentityldpid = None
self.mplsldpentityindex = None
self.mplsldppeerldpid = None
self.mplsldpsessionpeeraddrindex = None
self.mplsldpsessionpeernexthopaddrtype = None
self.mplsldpsessionpeernexthopaddr = None
self._segment_path = lambda: "mplsLdpSessionPeerAddrEntry" + "[mplsLdpEntityLdpId='" + str(self.mplsldpentityldpid) + "']" + "[mplsLdpEntityIndex='" + str(self.mplsldpentityindex) + "']" + "[mplsLdpPeerLdpId='" + str(self.mplsldppeerldpid) + "']" + "[mplsLdpSessionPeerAddrIndex='" + str(self.mplsldpsessionpeeraddrindex) + "']"
self._absolute_path = lambda: "MPLS-LDP-STD-MIB:MPLS-LDP-STD-MIB/mplsLdpSessionPeerAddrTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(MPLSLDPSTDMIB.MplsLdpSessionPeerAddrTable.MplsLdpSessionPeerAddrEntry, [u'mplsldpentityldpid', u'mplsldpentityindex', u'mplsldppeerldpid', u'mplsldpsessionpeeraddrindex', u'mplsldpsessionpeernexthopaddrtype', u'mplsldpsessionpeernexthopaddr'], name, value)
def clone_ptr(self):
self._top_entity = MPLSLDPSTDMIB()
return self._top_entity
| 55.323188
| 1,407
| 0.642164
| 113,832
| 0.994001
| 0
| 0
| 0
| 0
| 0
| 0
| 80,009
| 0.698653
|
db098feaf4cd2fcd3ec50721e2eaf014b9b9cc97
| 892
|
py
|
Python
|
atlas/foundations_contrib/src/foundations_contrib/helpers/shell.py
|
DeepLearnI/atlas
|
8aca652d7e647b4e88530b93e265b536de7055ed
|
[
"Apache-2.0"
] | 296
|
2020-03-16T19:55:00.000Z
|
2022-01-10T19:46:05.000Z
|
atlas/foundations_contrib/src/foundations_contrib/helpers/shell.py
|
DeepLearnI/atlas
|
8aca652d7e647b4e88530b93e265b536de7055ed
|
[
"Apache-2.0"
] | 57
|
2020-03-17T11:15:57.000Z
|
2021-07-10T14:42:27.000Z
|
atlas/foundations_contrib/src/foundations_contrib/helpers/shell.py
|
DeepLearnI/atlas
|
8aca652d7e647b4e88530b93e265b536de7055ed
|
[
"Apache-2.0"
] | 38
|
2020-03-17T21:06:05.000Z
|
2022-02-08T03:19:34.000Z
|
def find_bash():
import os
if os.name == 'nt':
return _find_windows_bash()
return '/bin/bash'
def _find_windows_bash():
winreg = _winreg_module()
import csv
StringIO = _get_string_io()
from os.path import dirname
sub_key = 'Directory\\shell\\git_shell\\command'
value = winreg.QueryValue(winreg.HKEY_CLASSES_ROOT, sub_key)
with StringIO(value) as file:
reader = csv.reader(file, delimiter=' ', quotechar='"')
git_bash_location = list(reader)[0][0]
git_bash_directory = git_bash_location.split('\\git-bash.exe')[0]
bash_location = git_bash_directory + '\\bin\\bash.exe'
return bash_location
def _get_string_io():
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
return StringIO
def _winreg_module():
import winreg
return winreg
| 25.485714
| 73
| 0.659193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.103139
|
db0ab3da5d70c76acedaa4a8af65bab398892ba2
| 9,104
|
py
|
Python
|
app/models/user.py
|
tonyngophd/dronest
|
f0976c31cbbf6fb032851bd42ac566bb381608f0
|
[
"MIT"
] | 13
|
2021-02-03T13:26:59.000Z
|
2021-03-24T19:34:19.000Z
|
app/models/user.py
|
suasllc/dronest
|
f0976c31cbbf6fb032851bd42ac566bb381608f0
|
[
"MIT"
] | null | null | null |
app/models/user.py
|
suasllc/dronest
|
f0976c31cbbf6fb032851bd42ac566bb381608f0
|
[
"MIT"
] | 1
|
2021-06-07T17:56:58.000Z
|
2021-06-07T17:56:58.000Z
|
from .db import db
from .userfollower import UserFollower
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import Table, Column, Integer, ForeignKey, or_
from .directmessage import DirectMessage
from .userequipment import UserEquipment
from .equipment import Equipment
from .message import Message
from .messagereceiver import MessageReceiver
from sqlalchemy.orm import validates
class User(db.Model, UserMixin):
__tablename__ = 'Users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(40), nullable = False, unique = True)
name = db.Column(db.String(100), nullable=True)
email = db.Column(db.String(255), nullable = False, unique = True)
hashed_password = db.Column(db.String(255), nullable = False)
bio = db.Column(db.Text, nullable=True)
websiteUrl = db.Column(db.Text, nullable=False, default="www.google.com")
userType = db.Column(db.Integer, nullable=True, default=0)
profilePicUrl = db.Column(db.Text, nullable=True)
createdAt = db.Column(db.DateTime(timezone=True), server_default=db.func.now()) #func.sysdate())
updatedAt = db.Column(db.DateTime(timezone=True), server_default=db.func.now(), server_onupdate=db.func.now())
ownPosts = db.relationship('Post', foreign_keys='Post.userId')
ownComments = db.relationship('Comment', foreign_keys='Comment.userId')
taggedInPosts = db.relationship('Post', secondary='taggedusers')
likedPosts = db.relationship('Post', secondary='likedposts')
savedPosts = db.relationship('Post', secondary='savedposts')
sentMessages = db.relationship('DirectMessage', foreign_keys='DirectMessage.senderId')
receivedMessages = db.relationship('DirectMessage', foreign_keys='DirectMessage.receiverId')
likedComments = db.relationship('Comment', secondary='commentlikes')
taggedInComments = db.relationship('Comment', secondary='commenttaggedusers')
followers = [] #db.relationship('User', secondary='userfollowers', foreign_keys='UserFollower.followerId')
following = [] #db.relationship('User', secondary='userfollowers', foreign_keys='UserFollower.userId')
allMessages = []
# equipmentList = []
equipmentList = db.relationship('Equipment', secondary="UserEquipments")
# @validates('username', 'email')
# def convert_lower(self, key, value):
# return value.lower()
@property
def password(self):
return self.hashed_password
@password.setter
def password(self, password):
self.hashed_password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
def get_followers(self):
ufs = UserFollower.query.filter(UserFollower.userId == self.id).all()
self.followers = [uf.follower for uf in ufs]
def get_following(self):
ufs = UserFollower.query.filter(UserFollower.followerId == self.id).all()
self.following = [uf.person for uf in ufs]
def get_messages(self):
msgs = DirectMessage.query\
.filter(or_(DirectMessage.senderId == self.id, \
DirectMessage.receiverId == self.id)).order_by(DirectMessage.id).all()
self.allMessages = msgs
def get_conversations(self):
convos = MessageReceiver.query\
.filter(or_(MessageReceiver.senderId == self.id, \
MessageReceiver.receiverId == self.id)).order_by(MessageReceiver.id).all()
uniqueConvos = []
if len(convos):
messageIdSet = set()
for convo in convos:
if convo.senderId != self.id:
uniqueConvos.append(convo)
else:
if convo.messageId not in messageIdSet:
uniqueConvos.append(convo)
messageIdSet.add(convo.messageId)
self.allMessages = uniqueConvos
def get_last_conversation(self):
convo = MessageReceiver.query\
.filter(or_(MessageReceiver.senderId == self.id, \
MessageReceiver.receiverId == self.id)).order_by(-MessageReceiver.id).first()
self.allMessages = [convo]
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_with_posts_and_follows(self):
self.get_followers()
self.get_following()
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"ownPosts": [post.to_dict() for post in self.ownPosts],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
def to_dict_with_posts(self):
return {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"ownPosts": [post.to_dict() for post in self.ownPosts],
}
def to_dict_with_posts_fast(self):
user_as_dict_basic = {
"id": self.id,
"name": self.name,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
user_as_dict = user_as_dict_basic.copy()
user_as_dict["ownPosts"] = [post.to_dict_fast_own_user(user_as_dict_basic) for post in self.ownPosts]
return user_as_dict
# "ownPosts": [post.to_dict_fast() for post in self.ownPosts],
def to_dict_feed(self):
self.get_following()
return {
"followingIds": [int(follow.id) for follow in self.following]
}
def to_dict_for_mentions(self):
return {
"id": self.id,
"displayName": self.name,
"name": self.username,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_no_posts(self):
#no posts so if a post has this user, there is no infinite circular references
return {
"id": self.id,
"username": self.username,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
}
def to_dict_for_self(self):
self.get_followers()
self.get_following()
# self.get_messages()
self.get_conversations()
return {
"id": self.id,
"username": self.username,
"name": self.name,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"userType": self.userType,
"ownPosts": [post.to_dict() for post in self.ownPosts],
"likedPosts": [post.to_dict() for post in self.likedPosts],
"savedPosts": [post.to_dict() for post in self.savedPosts],
"taggedInPosts": [post.to_dict() for post in self.taggedInPosts],
"messages": [m.to_dict() for m in self.allMessages], #[sentMsg.to_dict() for sentMsg in self.sentMessages] + [recvdMsg.to_dict() for recvdMsg in self.receivedMessages],
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"likedComments": [comment.to_dict() for comment in self.likedComments],
"taggedInComments": [comment.to_dict() for comment in self.taggedInComments],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
def to_dict_as_generic_profile(self):
'''
compared to "for_self" this does not include:
- messages
and more later
'''
self.get_followers()
self.get_following()
return {
"id": self.id,
"username": self.username,
"name": self.name,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"ownPosts": [post.to_dict() for post in self.ownPosts],
"likedPosts": [post.to_dict() for post in self.likedPosts],
"savedPosts": [post.to_dict() for post in self.savedPosts],
"taggedInPosts": [post.to_dict() for post in self.taggedInPosts],
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"likedComments": [comment.to_dict() for comment in self.likedComments],
"taggedInComments": [comment.to_dict() for comment in self.taggedInComments],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
'''
mapper(
User, t_users,
properties={
'followers': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.followee_id==t_users.c.id),
secondaryjoin=(t_follows.c.follower_id==t_users.c.id),
),
'followees': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.follower_id==t_users.c.id),
secondaryjoin=(t_follows.c.followee_id==t_users.c.id),
),
},
)
'''
| 35.286822
| 174
| 0.672781
| 8,133
| 0.893344
| 0
| 0
| 171
| 0.018783
| 0
| 0
| 2,259
| 0.248133
|
db0bae1eb24630016d687ec03ec4ffa465df2055
| 397
|
py
|
Python
|
pyflu/update/signals.py
|
flupke/pyflu
|
8856759ced5367fc8439a418b3ce6570b82707ce
|
[
"BSD-3-Clause"
] | 1
|
2017-07-17T06:50:24.000Z
|
2017-07-17T06:50:24.000Z
|
pyflu/update/signals.py
|
flupke/pyflu
|
8856759ced5367fc8439a418b3ce6570b82707ce
|
[
"BSD-3-Clause"
] | null | null | null |
pyflu/update/signals.py
|
flupke/pyflu
|
8856759ced5367fc8439a418b3ce6570b82707ce
|
[
"BSD-3-Clause"
] | null | null | null |
from louie import Signal
class update_finished(Signal):
"""
Sent by :class:`~pyflu.update.qt.UpdateDialogMixin` when an update finished
successfully.
It receives a single argument, containing the path of the patched files.
"""
class not_updated(Signal):
"""
Sent by :meth:`~pyflu.update.qt.UpdateDialogMixin.start_update` when no
update was performed.
"""
| 22.055556
| 79
| 0.697733
| 366
| 0.921914
| 0
| 0
| 0
| 0
| 0
| 0
| 300
| 0.755668
|
db0cc1dc2ea1b2e1fa0e57ca089770ba09f4f7f8
| 9,443
|
py
|
Python
|
sentence_transformers/losses/BatchHardTripletLoss.py
|
zhangxieyang2/sentence-transformers
|
87847b86954f92d200fbb4351b0576f4778d9381
|
[
"Apache-2.0"
] | 5
|
2021-08-10T02:31:51.000Z
|
2022-02-08T01:12:25.000Z
|
sentence_transformers/losses/BatchHardTripletLoss.py
|
zhangxieyang2/sentence-transformers
|
87847b86954f92d200fbb4351b0576f4778d9381
|
[
"Apache-2.0"
] | 5
|
2021-07-02T04:37:04.000Z
|
2021-07-21T00:02:58.000Z
|
sentence_transformers/losses/BatchHardTripletLoss.py
|
zhangxieyang2/sentence-transformers
|
87847b86954f92d200fbb4351b0576f4778d9381
|
[
"Apache-2.0"
] | 5
|
2021-07-04T06:02:02.000Z
|
2021-07-21T08:32:10.000Z
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class BatchHardTripletLossDistanceFunction:
"""
This class defines distance functions, that can be used with Batch[All/Hard/SemiHard]TripletLoss
"""
@staticmethod
def cosine_distance(embeddings):
"""
Compute the 2D matrix of cosine distances (1-cosine_similarity) between all embeddings.
"""
return 1 - util.pytorch_cos_sim(embeddings, embeddings)
@staticmethod
def eucledian_distance(embeddings, squared=False):
"""
Compute the 2D matrix of eucledian distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = torch.matmul(embeddings, embeddings.t())
# Get squared L2 norm for each embedding. We can just take the diagonal of `dot_product`.
# This also provides more numerical stability (the diagonal of the result will be exactly 0).
# shape (batch_size,)
square_norm = torch.diag(dot_product)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = square_norm.unsqueeze(0) - 2.0 * dot_product + square_norm.unsqueeze(1)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances[distances < 0] = 0
if not squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = distances.eq(0).float()
distances = distances + mask * 1e-16
distances = (1.0 - mask) * torch.sqrt(distances)
return distances
class BatchHardTripletLoss(nn.Module):
"""
BatchHardTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
triplets, i.e., anchor and positive must have the same label, anchor and negative a different label. It then looks
for the hardest positive and the hardest negatives.
The labels must be integers, with same label indicating sentences from the same class. You train dataset
must contain at least 2 examples per label class. The margin is computed automatically.
Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
Blog post: https://omoindrot.github.io/triplet-loss
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two emeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
Example::
from sentence_transformers import SentenceTransformer, SentencesDataset, losses
from sentence_transformers.readers import InputExample
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
train_examples = [InputExample(texts=['Sentence from class 0'], label=0), InputExample(texts=['Another sentence from class 0'], label=0),
InputExample(texts=['Sentence from class 1'], label=1), InputExample(texts=['Sentence from class 2'], label=2)]
train_dataset = SentencesDataset(train_examples, model)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
train_loss = losses.BatchHardTripletLoss(model=model)
"""
def __init__(self, model: SentenceTransformer, distance_metric = BatchHardTripletLossDistanceFunction.eucledian_distance, margin: float = 5):
super(BatchHardTripletLoss, self).__init__()
self.sentence_embedder = model
self.triplet_margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.sentence_embedder(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
return self.batch_hard_triplet_loss(labels, reps[0])
# Hard Triplet Loss
# Source: https://github.com/NegatioN/OnlineMiningTripletLoss/blob/master/online_triplet_loss/losses.py
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
# Blog post: https://omoindrot.github.io/triplet-loss
def batch_hard_triplet_loss(self, labels: Tensor, embeddings: Tensor) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = self.distance_metric(embeddings)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = BatchHardTripletLoss.get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = BatchHardTripletLoss.get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
tl = hardest_positive_dist - hardest_negative_dist + self.triplet_margin
tl[tl < 0] = 0
triplet_loss = tl.mean()
return triplet_loss
@staticmethod
def get_triplet_mask(labels):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Check that i, j and k are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
i_not_equal_j = indices_not_equal.unsqueeze(2)
i_not_equal_k = indices_not_equal.unsqueeze(1)
j_not_equal_k = indices_not_equal.unsqueeze(0)
distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k
label_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
i_equal_j = label_equal.unsqueeze(2)
i_equal_k = label_equal.unsqueeze(1)
valid_labels = ~i_equal_k & i_equal_j
return valid_labels & distinct_indices
@staticmethod
def get_anchor_positive_triplet_mask(labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check that i and j are distinct
indices_equal = torch.eye(labels.size(0), device=labels.device).bool()
indices_not_equal = ~indices_equal
# Check if labels[i] == labels[j]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1)
return labels_equal & indices_not_equal
@staticmethod
def get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
return ~(labels.unsqueeze(0) == labels.unsqueeze(1))
| 46.517241
| 162
| 0.684528
| 9,228
| 0.977232
| 0
| 0
| 4,181
| 0.442762
| 0
| 0
| 5,591
| 0.592079
|
db0cd377f76bee16bf9abd7de52027837704b690
| 2,505
|
py
|
Python
|
wedding/card/route.py
|
ackneal/wedday
|
b57b524e3aa237a2568bda4fadb2d5709773c507
|
[
"MIT"
] | null | null | null |
wedding/card/route.py
|
ackneal/wedday
|
b57b524e3aa237a2568bda4fadb2d5709773c507
|
[
"MIT"
] | null | null | null |
wedding/card/route.py
|
ackneal/wedday
|
b57b524e3aa237a2568bda4fadb2d5709773c507
|
[
"MIT"
] | null | null | null |
from flask import Flask, Blueprint, request, make_response, jsonify
from sqlalchemy.sql.expression import func
from google.cloud import storage
from .card import Cards
from ..functions import valid_param, upload_file
from .. import db
bp = Blueprint('route', __name__, url_prefix = '/api')
@bp.route('/cards')
def getallphoto():
limit = request.args.get('limit', 8)
offset = request.args.get('offset', 0)
try:
offset = int(offset)
limit = int(limit)
if 0 == limit:
return jsonify({'error': True, 'message': 'limit 不行是 0'}), 400
except ValueError:
return jsonify({'error': True, 'message': 'limt & offset need to be integer'}), 400
cards = Cards.query.order_by(Cards.id.desc()).limit(limit + 1).offset(offset)
result = []
has_more = False
for index, card in enumerate(cards):
if index == (limit):
has_more = True
break;
result.append(card.to_dict())
return jsonify({'data': result, 'has_more': has_more})
@bp.route('/cards', methods = ['POST'])
def store():
image = request.files.get('image')
if image is None:
return jsonify({'error': True, 'message': '請上傳照片'}), 400
form = request.form
if not valid_param(form, ['message', 'name']):
return jsonify({'error': True, 'message': '參數不完整'}), 400
try:
file_path = upload_file(image)
print(file_path)
except TypeError as error:
return jsonify({'error': True, 'message': format(error)}), 400
except:
return jsonify({'error': True, 'message': '檔案上傳失敗'}), 500
card = Cards(name = form['name'], message = form['message'], image = file_path)
try:
db.session.add(card)
db.session.commit()
except:
return jsonify({'error': True, 'message': '留言失敗'}), 500
return jsonify({'error': False, 'data': card.to_dict()})
# 抽獎, 依 limit 決定抽幾個
@bp.route('/card', methods = ['GET'])
def randomCard():
limit = request.args.get('limit', 0)
try:
limit = int(limit)
if limit <= 0:
return jsonify({'error': True, 'message': '參數不正確'}), 400
except ValueError:
return jsonify({'error': True, 'message': '參數不正確'}), 400
cards = Cards.query.filter_by(status=0).order_by(func.rand()).limit(limit).all()
result = []
for card in cards:
# 更新 status
card.status = 1
db.session.commit()
result.append(card.to_dict())
return jsonify({'data': result})
| 29.127907
| 91
| 0.602395
| 0
| 0
| 0
| 0
| 2,258
| 0.871478
| 0
| 0
| 490
| 0.189116
|
db0de61b39c2d473b879ae1a407b8e263bd53ec2
| 6,804
|
py
|
Python
|
mudi/utils.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | 1
|
2021-11-04T00:08:00.000Z
|
2021-11-04T00:08:00.000Z
|
mudi/utils.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
mudi/utils.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
import numpy as np
import h5py
import scipy
import gc
import pandas as pd
import os
import time
import pkg_resources
import scanpy as sc
import scanpy.external as sce
import sys
import scrublet as scr
# ---------------------------------
# Scanpy Helpers
# ---------------------------------
def scanpy_adata_loader(path, genome='GRCh38', verbose=True):
"""
Loader function.
------------------
Can handle lists of file/dir paths, a file (.h5) or a directory format.
Use this to load/aggregate immediately into a scanpy object.
"""
if isinstance(path, list):
if verbose:
print("Combining {} inputs.".format(len(path)))
tmp = [scanpy_adata_loader(f, genome=genome) for f in path]
return tmp[0].concatenate(tmp[1:])
if os.path.isfile(path):
ad = sc.read_10x_h5(path, genome=genome)
ad.var_names_make_unique()
return ad
elif os.path.isdir(path):
ad = sc.read_10x_mtx(path)
ad.var_names_make_unique()
return ad
else:
raise FileError("Provide proper path.")
def get_percent_expr(adata, groupby):
"""
Get percent expressed & mean expression.
------------------------------
Requires:
* adata.layers['counts'] -> counting percent of cells with gene expressed
* adata.raw.X -> for computing mean expression (log1p)
"""
from tqdm import tqdm
groups = list(adata.obs[groupby].cat.categories)
res_in = pd.DataFrame(columns=adata.var_names, index=groups)
res_out = pd.DataFrame(columns=adata.var_names, index=groups)
res_mean_in = pd.DataFrame(columns=adata.var_names, index=groups)
res_mean_out = pd.DataFrame(columns=adata.var_names, index=groups)
for group in tqdm(groups, desc="Computing metrics per group"):
res_in.loc[group] = (adata[adata.obs[groupby].isin([group]),:].layers['counts'] > 0).mean(0)
res_out.loc[group] = (adata[~adata.obs[groupby].isin([group]),:].layers['counts'] > 0).mean(0)
res_mean_in.loc[group] = adata[adata.obs[groupby].isin([group]),:].raw.X.mean(0)
res_mean_out.loc[group] = adata[~adata.obs[groupby].isin([group]),:].raw.X.mean(0)
res_in = res_in.T
res_out = res_out.T
res_mean_in = res_mean_in.T
res_mean_out = res_mean_out.T
res_in = res_in.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'percent_in'})
res_out = res_out.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'percent_out'})
res_mean_in = res_mean_in.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'mean_expr_in'})
res_mean_out = res_mean_out.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'mean_expr_out'})
return pd.merge(pd.merge(res_in, res_out), pd.merge(res_mean_in, res_mean_out))
def aggr_markers(adata, uns='rank_genes_groups', expr_metrics=True):
"""
Aggregate markers.
------------------
Returns an easy to view marker list dataframe.
Assumes 'rank_genes_groups' has already been called to find group markers
in AnnData Object.
* expr_metrics -> compute percent of cells expressed & mean expression for in/out groups.
"""
assert adata.uns[uns], 'Compute differentially expressed genes first.'
aggr_df = sc.get.rank_genes_groups_df(adata, None)
if expr_metrics:
aggr_percent_expr = get_percent_expr(adata, adata.uns[uns]['params']['groupby'])
return pd.merge(aggr_df, aggr_percent_expr)
else:
return aggr_df
def get_de_genes_to_plot(markers_df, lfc_thresh=1, padj_thresh=0.1, n_to_plot=5):
"""
Top DiffExp Genes.
Return as dict for easy plotting with sc.pl.dotplot.
"""
markers_df = markers_df[
(markers_df['logfoldchanges']>=lfc_thresh) &
(markers_df['pvals_adj']<=padj_thresh)
].groupby("group").head(n_to_plot)
return markers_df.groupby("group").agg(list)['names'].to_dict()
def get_uns(adata, tag):
"""
Retrieve unstructured data stored in AnnData.
------------------------
Inputs:
- adata: AnnData Object
- tag: name of key in adata.uns
Outputs:
- pd.DataFrame: formatted information in adata.uns
"""
assert tag in adata.uns, "{} not found in adata.uns".format(tag)
try:
return pd.DataFrame(adata.uns[tag]['values'], index=adata.uns[tag]['rows'], columns=adata.uns[tag]['cols'])
except:
raise ValueError("Unable to return structured dataframe from data.uns[{}]".format(tag))
def get_a_by_b(adata, a, b, norm=False):
"""
Get A x B.
----------------
Number of each .obs b per .obs a
returns pd.Dataframe
"""
hm = adata.obs.groupby([a,b]).size().reset_index().set_index(a).pivot(columns=b)
if norm:
hm = hm.div(hm.sum(1), 0)
hm.columns = hm.columns.droplevel()
hm.columns.name = None
return hm
# ---------------------------------
# Utilities
# ---------------------------------
def score_cc_genes(adata, cc_genes_file=pkg_resources.resource_filename('mudi', './ref/cell_cycle_genes/Macosko_cell_cycle_genes.txt')):
"""
Score Cell-Cycle Genes
------------------------------------
How to run:
score_cc_genes(adata)
Loads cell cycle genes list (ex. Macosko et al 2015) and runs cycle-scoring
on input anndata. Does everything in place. Stores the following in .obs:
- S_score
- G2M_score
- phase
"""
cc_genes = pd.read_table(cc_genes_file, delimiter='\t')
s_genes = cc_genes['S'].dropna()
g2m_genes = cc_genes['G2.M'].dropna()
s_genes_i = adata.var_names[np.in1d(adata.var_names, s_genes)]
g2m_genes_i = adata.var_names[np.in1d(adata.var_names, g2m_genes)]
sc.tl.score_genes_cell_cycle(adata, s_genes_i, g2m_genes_i)
def score_doublets(adata, key='batch', n_prin_comps=20, verbose=False):
"""
Scrubber: wrapper for Scrublet.
------------------------------------
How to run:
score_doublets(adata)
Adds the following to anndata object:
- adata.obs['scrublet_score'] --> float (0 - 1.0)
- adata.obs['doublet'] --> bool
"""
doublet_scores, predicted_doublets = list(),list()
for batch in adata.obs[key].drop_duplicates().values:
scrub = scr.Scrublet(adata[adata.obs[key]==batch].X)
_doublet_scores, _predicted_doublets = scrub.scrub_doublets(n_prin_comps=n_prin_comps, verbose=verbose)
doublet_scores.append(_doublet_scores)
predicted_doublets.append(_predicted_doublets)
adata.obs['scrublet_score'] = np.concatenate(doublet_scores)
adata.obs['doublet'] = np.concatenate(predicted_doublets)
| 36.191489
| 147
| 0.640212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,654
| 0.390065
|
db0e44fa6d9ec7326e7caba29ef74b40e65149d4
| 1,518
|
py
|
Python
|
src/quacks/mypy.py
|
ariebovenberg/quacks
|
839d307b24f3f37d9a5318c16acb631b9a1153f0
|
[
"MIT"
] | 11
|
2021-12-12T20:51:15.000Z
|
2022-02-02T12:08:32.000Z
|
src/quacks/mypy.py
|
ariebovenberg/quacks
|
839d307b24f3f37d9a5318c16acb631b9a1153f0
|
[
"MIT"
] | 8
|
2021-12-14T12:53:51.000Z
|
2022-03-15T04:29:44.000Z
|
src/quacks/mypy.py
|
ariebovenberg/quacks
|
839d307b24f3f37d9a5318c16acb631b9a1153f0
|
[
"MIT"
] | 1
|
2021-12-15T16:50:34.000Z
|
2021-12-15T16:50:34.000Z
|
from typing import Callable, Optional, Type
from mypy.nodes import AssignmentStmt, NameExpr, Statement, TempNode, Var
from mypy.plugin import ClassDefContext, Plugin
READONLY_DECORATOR_NAME = "quacks.readonly"
# this logic is mostly derived from the dataclasses plugin
def make_statement_readonly(c: ClassDefContext, s: Statement) -> None:
if not (isinstance(s, AssignmentStmt) and s.new_syntax):
return
lhs = s.lvalues[0]
if not isinstance(lhs, NameExpr):
return
if not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs):
c.api.msg.fail(
"@readonly doesn't support default values yet.", context=lhs
)
return
sym = c.cls.info.names.get(lhs.name)
if sym is None:
return
node = sym.node
assert isinstance(node, Var)
if node.is_classvar:
return
node.is_property = True
def make_readonly(c: ClassDefContext) -> None:
if not c.cls.info.is_protocol:
c.api.msg.fail(
"@readonly decorator only supported on protocols.", context=c.cls
)
for stmt in c.cls.defs.body:
make_statement_readonly(c, stmt)
class _QuacksPlugin(Plugin):
def get_class_decorator_hook(
self, fullname: str
) -> Optional[Callable[[ClassDefContext], None]]:
if fullname == READONLY_DECORATOR_NAME:
return make_readonly
return None
def plugin(version: str) -> Type[Plugin]:
"""Plugin's public API and entrypoint."""
return _QuacksPlugin
| 26.631579
| 77
| 0.667984
| 245
| 0.161397
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.140316
|
db0eabb87d8f110b34f799008d45115ae3494a8a
| 470
|
py
|
Python
|
tests/test_toolbar.py
|
WilliamMayor/django-mail-panel
|
2c41f808a645d5d7bad90510f44e53d29981cf22
|
[
"Apache-2.0"
] | null | null | null |
tests/test_toolbar.py
|
WilliamMayor/django-mail-panel
|
2c41f808a645d5d7bad90510f44e53d29981cf22
|
[
"Apache-2.0"
] | null | null | null |
tests/test_toolbar.py
|
WilliamMayor/django-mail-panel
|
2c41f808a645d5d7bad90510f44e53d29981cf22
|
[
"Apache-2.0"
] | null | null | null |
from .context import *
import unittest
from mail_panel.panels import MailToolbarPanel
class ToolbarSuite(unittest.TestCase):
def test_panel(self):
"""
General 'does it run' test.
"""
p = MailToolbarPanel(None)
assert(p.toolbar is None)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ToolbarSuite))
return suite
if __name__ == "__main__":
unittest.TextTestRunner().run(suite())
| 21.363636
| 51
| 0.668085
| 194
| 0.412766
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.129787
|
db0fa33383a316fc52554465b3c7c6c0aa5f9ac3
| 8,130
|
py
|
Python
|
tests/project/operations/operational_types/test_common_functions.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | 44
|
2020-10-27T19:05:44.000Z
|
2022-03-22T17:17:37.000Z
|
tests/project/operations/operational_types/test_common_functions.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | 67
|
2020-10-08T22:36:53.000Z
|
2022-03-22T22:58:33.000Z
|
tests/project/operations/operational_types/test_common_functions.py
|
anamileva/gridpath
|
e55eacb88ca5e6c034a90b18819e17cbd6f43854
|
[
"Apache-2.0"
] | 21
|
2020-10-08T23:23:48.000Z
|
2022-03-28T01:21:21.000Z
|
# Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from importlib import import_module
import numpy as np
import os.path
import pandas as pd
import sys
import unittest
from tests.common_functions import add_components_and_load_data
from gridpath.project.operations.operational_types.common_functions import \
determine_relevant_timepoints
TEST_DATA_DIRECTORY = \
os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints", "temporal.operations.horizons",
"geography.load_zones",
"project.__init__"
]
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package='gridpath')
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
class TestOperationalTypeCommonFunctions(unittest.TestCase):
"""
Test the common_functions module in the operational types package.
"""
def test_determine_relevant_timepoints(self):
"""
Check that the list of relevant timepoints is as expected based on
the current timepoint and the minimum up/down time (and, on the data
side, the duration of other timepoints). Add any other cases to
check that the 'determine_relevant_timepoints' function gives the
expected results.
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=None, # No need to name since not adding components
test_data_dir=TEST_DATA_DIRECTORY,
subproblem="",
stage=""
)
instance = m.create_instance(data)
test_cases = {
1: {"min_time": 4, "g": "Gas_CCGT", "tmp": 20200103,
"relevant_timepoints": [20200103, 20200102]},
2: {"min_time": 5, "g": "Gas_CCGT", "tmp": 20200103,
"relevant_timepoints":
[20200103, 20200102, 20200101, 20200124, 20200123]},
3: {"min_time": 8, "g": "Gas_CCGT", "tmp": 20200103,
"relevant_timepoints":
[20200103, 20200102, 20200101, 20200124, 20200123,
20200122, 20200121]},
4: {"min_time": 1, "g": "Gas_CCGT", "tmp": 20200120,
"relevant_timepoints": [20200120, 20200119, 20200118]},
5: {"min_time": 2, "g": "Gas_CCGT", "tmp": 20200120,
"relevant_timepoints":
[20200120, 20200119, 20200118, 20200117]},
6: {"min_time": 3, "g": "Gas_CCGT", "tmp": 20200120,
"relevant_timepoints":
[20200120, 20200119, 20200118, 20200117, 20200116]},
# Test min times of longer duration than the horizon in a
# 'circular' horizon setting
7: {"min_time": 100, "g": "Gas_CCGT", "tmp": 20200101,
"relevant_timepoints":
[20200101, 20200124, 20200123, 20200122, 20200121,
20200120, 20200119, 20200118, 20200117, 20200116,
20200115, 20200114, 20200113, 20200112, 20200111,
20200110, 20200109, 20200108, 20200107, 20200106,
20200105, 20200104, 20200103, 20200102, 20200101]},
# If we're in the first timepoint of a linear horizon, test that
# we only get that timepoint (i.e. that we break out of the loop
# before adding any more timepoints)
8: {"min_time": 100, "g": "Gas_CCGT", "tmp": 20200201,
"relevant_timepoints": [20200201]},
# Test that we break out of the loop with min times that reach the
# first horizon timepoint in a 'linear' horizon setting
9: {"min_time": 100, "g": "Gas_CCGT", "tmp": 20200202,
"relevant_timepoints": [20200202, 20200201]}
}
for test_case in test_cases.keys():
expected_list = test_cases[test_case]["relevant_timepoints"]
actual_list, actual_linked_tmps = determine_relevant_timepoints(
mod=instance,
g=test_cases[test_case]["g"],
tmp=test_cases[test_case]["tmp"],
min_time=test_cases[test_case]["min_time"]
)
self.assertListEqual(expected_list, actual_list)
# No linked timepoints, so check that the list is empty in every
# test case
self.assertListEqual([], actual_linked_tmps)
def test_determine_relevant_linked_timepoints(self):
"""
Check that the lists of relevant timepoints and relevant linked
timepoints are as expected based on the current timepoint and the
minimum up/down time (and, on the data side, the duration of other
timepoints).
"""
m, data = add_components_and_load_data(
prereq_modules=IMPORTED_PREREQ_MODULES,
module_to_test=None, # No need to name since not adding components
test_data_dir=os.path.join(TEST_DATA_DIRECTORY, "subproblems"),
subproblem="202002",
stage=""
)
instance = m.create_instance(data)
test_cases = {
1: {"min_time": 4, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203, 20200202, 20200201],
"relevant_linked_timepoints": [0]},
2: {"min_time": 5, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203, 20200202, 20200201],
"relevant_linked_timepoints": [0, -1]},
# Stop at the last included linked timepoint if the min time is
# longer than the total duration of the current timepoint to the
# last linked timepoint
3: {"min_time": 24, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203, 20200202, 20200201],
"relevant_linked_timepoints": [0, -1, -2, -3, -4, -5, -6,
-7, -8, -9, -10, -11]},
# No linked timepoint if min time does not reach them
4: {"min_time": 1, "g": "Gas_CCGT", "tmp": 20200203,
"relevant_timepoints": [20200203],
"relevant_linked_timepoints": []},
# Starting in the first timepoint of the horizon
5: {"min_time": 4, "g": "Gas_CCGT", "tmp": 20200201,
"relevant_timepoints": [20200201],
"relevant_linked_timepoints": [0, -1, -2]},
}
for test_case in test_cases.keys():
expected_rel_tmp_list = test_cases[test_case][
"relevant_timepoints"]
expected_rel_linked_tmp_list = test_cases[test_case][
"relevant_linked_timepoints"]
actual_rel_tmp_list, actual_rel_linked_tmp_list = \
determine_relevant_timepoints(
mod=instance,
g=test_cases[test_case]["g"],
tmp=test_cases[test_case]["tmp"],
min_time=test_cases[test_case]["min_time"]
)
self.assertListEqual(expected_rel_tmp_list, actual_rel_tmp_list)
self.assertListEqual(actual_rel_linked_tmp_list,
expected_rel_linked_tmp_list)
if __name__ == "__main__":
unittest.main()
| 44.42623
| 79
| 0.605904
| 6,525
| 0.802583
| 0
| 0
| 0
| 0
| 0
| 0
| 3,211
| 0.394957
|
db0fa4a708c3b8da99f0eb3651ee65d3e1405fa0
| 338
|
py
|
Python
|
top_links.py
|
judge2020/crossover-viz
|
61fef8750f2b64a2e71b9737a3c992f99c47c300
|
[
"0BSD"
] | null | null | null |
top_links.py
|
judge2020/crossover-viz
|
61fef8750f2b64a2e71b9737a3c992f99c47c300
|
[
"0BSD"
] | null | null | null |
top_links.py
|
judge2020/crossover-viz
|
61fef8750f2b64a2e71b9737a3c992f99c47c300
|
[
"0BSD"
] | null | null | null |
from main import extract_data
if __name__ == '__main__':
top = {}
out = extract_data('CrossoverWiki.xml')
for name in out:
for link in name['links']:
w = link['with']
top[w] = top[w] + 1 if w in top else 1
top = dict(reversed(sorted(top.items(), key=lambda item: item[1])))
print(top)
| 28.166667
| 71
| 0.573964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.12426
|
db0fc2a14bd242c50cea5efa838e162798fc3772
| 316
|
py
|
Python
|
instance/settings.py
|
isaacjohnwesley/digfont
|
0f0a088151e52e972eec04dbc0b8c7fd6a30a52d
|
[
"MIT"
] | 2
|
2017-01-27T03:22:21.000Z
|
2018-10-30T15:26:33.000Z
|
instance/settings.py
|
isaacjohnwesley/digfont
|
0f0a088151e52e972eec04dbc0b8c7fd6a30a52d
|
[
"MIT"
] | null | null | null |
instance/settings.py
|
isaacjohnwesley/digfont
|
0f0a088151e52e972eec04dbc0b8c7fd6a30a52d
|
[
"MIT"
] | null | null | null |
"""
Flask application settings.
"""
import os
DEBUG = True
# Output un-merged files in debug mode.
#ASSETS_DEBUG = DEBUG
SECRET_KEY = os.environ.get('SECRET_KEY', None)
MY_VAR = os.environ.get('MY_VAR', None)
#: Mongodb settings
MONGODB_SETTINGS = {'DB' : 'digfont'}
#: CSRF key
SECRET_KEY = "dig.font.s3cr3t"
| 15.8
| 47
| 0.702532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 175
| 0.553797
|
db116d889b8b1d94133fabaa9ee920a870375f4b
| 839
|
py
|
Python
|
pangram.py
|
ZorbaTheStrange/pangram
|
f9fda95f119d328224f21f19690122e36be34482
|
[
"MIT"
] | null | null | null |
pangram.py
|
ZorbaTheStrange/pangram
|
f9fda95f119d328224f21f19690122e36be34482
|
[
"MIT"
] | null | null | null |
pangram.py
|
ZorbaTheStrange/pangram
|
f9fda95f119d328224f21f19690122e36be34482
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
'''
panogram.py - this program recongizes pangrams.
by zorba
'''
import sys
def pangram_check(sentence_or_word):
'''
checks the user input to see if it is a pangram.
'''
letters = set('abcdefghijklmnopqrstuvwxyz')
if sentence_or_word.lower() == 'done':
z
for letter in sentence_or_word.lower():
if letter in letters:
letters.remove(letter)
if len(letters) == 0:
print('\nThe sentence or word is a panogram!')
else:
print('\nThis sentence or word is not a panogram.')
def main():
'''
main
'''
sentence_or_word = input('\nPlease enter a sentence or a word to check to see if it is a pangram: \nIf Done, Please type Done')
pangram_check(sentence_or_word)
if __name__ == '__main__':
sys.exit(main())
| 19.511628
| 131
| 0.623361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 396
| 0.47199
|
db13d0f32b95cfef64253a43f004918a6c18619d
| 232
|
py
|
Python
|
Chapter-4 Sequence/Dictionary.py
|
jaiswalIT02/pythonprograms
|
bc94e52121202b04c3e9112d9786f93ed6707f7a
|
[
"MIT"
] | null | null | null |
Chapter-4 Sequence/Dictionary.py
|
jaiswalIT02/pythonprograms
|
bc94e52121202b04c3e9112d9786f93ed6707f7a
|
[
"MIT"
] | null | null | null |
Chapter-4 Sequence/Dictionary.py
|
jaiswalIT02/pythonprograms
|
bc94e52121202b04c3e9112d9786f93ed6707f7a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 15:31:57 2020
@author: Tarun Jaiswal
"""
dictone = {
"bookname": "Recursion Sutras",
"subject": "Recursion",
"author": "Champak Roy"
}
dicttwo = dict(dictone)
print(dicttwo)
| 15.466667
| 35
| 0.633621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.685345
|
db15276b717208ef752639b4aaf944577ef66238
| 1,032
|
py
|
Python
|
mportal/wsgi_start.py
|
auyeongwy/mportal
|
e406baea802093569c90c7206649c5afd9431dab
|
[
"Apache-2.0"
] | null | null | null |
mportal/wsgi_start.py
|
auyeongwy/mportal
|
e406baea802093569c90c7206649c5afd9431dab
|
[
"Apache-2.0"
] | null | null | null |
mportal/wsgi_start.py
|
auyeongwy/mportal
|
e406baea802093569c90c7206649c5afd9431dab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Au Yeong Wing Yau
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" All start-up processes to be called when the WSGI process starts.
"""
from mportal_tools import mportal_log
from mportal_tools import mportal_db
import mportal_urls, template_mgr
mportal_log.init_log() # Initializes logging file handler.
mportal_db.init_db() # Initializes database connections.
mportal_urls.init_urls() # Initializes URL list.
template_mgr.init_templates() # Initializes HTML templates.
| 34.4
| 74
| 0.774225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 799
| 0.774225
|
db16a58a234af950b25d6e13e770b9afd148413c
| 1,252
|
py
|
Python
|
lecture_04/312_plan_motion_ros_artist.py
|
farzanehesk/COMPAS-II-FS2022
|
857eb40000f0532d0c04689331eadefd38dce6b7
|
[
"MIT"
] | 11
|
2022-01-24T15:07:15.000Z
|
2022-03-29T12:58:05.000Z
|
lecture_04/312_plan_motion_ros_artist.py
|
farzanehesk/COMPAS-II-FS2022
|
857eb40000f0532d0c04689331eadefd38dce6b7
|
[
"MIT"
] | 4
|
2022-03-16T06:06:45.000Z
|
2022-03-29T22:59:11.000Z
|
lecture_04/312_plan_motion_ros_artist.py
|
farzanehesk/COMPAS-II-FS2022
|
857eb40000f0532d0c04689331eadefd38dce6b7
|
[
"MIT"
] | 20
|
2022-03-02T10:36:41.000Z
|
2022-03-09T00:12:33.000Z
|
import math
import time
from compas_fab.backends import RosClient
from compas.artists import Artist
from compas.geometry import Frame
with RosClient("localhost") as client:
robot = client.load_robot(load_geometry=True)
group = robot.main_group_name
frame = Frame((0.4, 0.3, 0.05), (-1, 0, 0), (0, 1, 0))
tolerance_position = 0.001
tolerance_axes = [math.radians(1)] * 3
start_configuration = robot.zero_configuration()
start_configuration.joint_values = (-0.106, 5.351, 2.231, -2.869, 4.712, 1.465)
# create goal constraints from frame
goal_constraints = robot.constraints_from_frame(frame, tolerance_position, tolerance_axes, group)
trajectory = robot.plan_motion(goal_constraints, start_configuration, group, options=dict(planner_id="RRT"))
print("Computed kinematic path with %d configurations." % len(trajectory.points))
print("Executing this path at full speed would take approx. %.3f seconds." % trajectory.time_from_start)
artist = Artist(robot.model)
for tp in trajectory.points:
config = robot.zero_configuration()
config.joint_values = tp.joint_values
artist.update(config)
artist.draw_visual()
artist.redraw()
time.sleep(0.02)
| 33.837838
| 112
| 0.713259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.134984
|
db16e37393c0ecb2b013bb3800feb96ec755b22d
| 1,306
|
py
|
Python
|
awxkit/test/cli/test_client.py
|
vrevelas/awx
|
858f43fd2aeccacd3172b1efa44fb37c7a48e92e
|
[
"Apache-2.0"
] | null | null | null |
awxkit/test/cli/test_client.py
|
vrevelas/awx
|
858f43fd2aeccacd3172b1efa44fb37c7a48e92e
|
[
"Apache-2.0"
] | null | null | null |
awxkit/test/cli/test_client.py
|
vrevelas/awx
|
858f43fd2aeccacd3172b1efa44fb37c7a48e92e
|
[
"Apache-2.0"
] | null | null | null |
from io import StringIO
import pytest
from requests.exceptions import ConnectionError
from awxkit.cli import run, CLI
class MockedCLI(CLI):
def fetch_version_root(self):
pass
@property
def v2(self):
return MockedCLI()
@property
def json(self):
return {
'users': None
}
@pytest.mark.parametrize('help_param', ['-h', '--help'])
def test_help(capfd, help_param):
with pytest.raises(SystemExit):
run(['awx {}'.format(help_param)])
out, err = capfd.readouterr()
assert "usage:" in out
for snippet in (
'--conf.host https://example.awx.org]',
'-v, --verbose'
):
assert snippet in out
def test_connection_error(capfd):
cli = CLI()
cli.parse_args(['awx'])
with pytest.raises(ConnectionError):
cli.connect()
@pytest.mark.parametrize('resource', ['', 'invalid'])
def test_list_resources(capfd, resource):
# if a valid resource isn't specified, print --help
cli = MockedCLI()
cli.parse_args(['awx {}'.format(resource)])
cli.connect()
cli.parse_resource()
out, err = capfd.readouterr()
assert "usage:" in out
for snippet in (
'--conf.host https://example.awx.org]',
'-v, --verbose'
):
assert snippet in out
| 21.409836
| 56
| 0.608729
| 217
| 0.166156
| 0
| 0
| 953
| 0.729709
| 0
| 0
| 246
| 0.188361
|
db18a54ed6a35015f51619ef8bd59e64ab56a6ea
| 10,797
|
py
|
Python
|
tests/python/pants_test/tasks/test_what_changed.py
|
areitz/pants
|
9bfb3feb0272c05f36e190c9147091b97ee1950d
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/tasks/test_what_changed.py
|
areitz/pants
|
9bfb3feb0272c05f36e190c9147091b97ee1950d
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/tasks/test_what_changed.py
|
areitz/pants
|
9bfb3feb0272c05f36e190c9147091b97ee1950d
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.from_target import FromTarget
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged
from pants.backend.core.wrapped_globs import RGlobs
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.source_root import SourceRoot
from pants.goal.workspace import Workspace
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class BaseWhatChangedTest(ConsoleTaskTestBase):
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'java_library': JavaLibrary,
'python_library': PythonLibrary,
'jar_library': JarLibrary,
'unpacked_jars': UnpackedJars,
'resources': Resources,
'java_thrift_library': JavaThriftLibrary,
'java_protobuf_library': JavaProtobufLibrary,
'python_thrift_library': PythonThriftLibrary,
},
context_aware_object_factories={
'source_root': SourceRoot.factory,
'rglobs': RGlobs,
'from_target': FromTarget,
},
objects={
'jar': JarDependency,
}
)
@classmethod
def task_type(cls):
return WhatChanged
def assert_console_output(self, *output, **kwargs):
options = {'spec_excludes': [], 'exclude_target_regexp': []}
if 'options' in kwargs:
options.update(kwargs['options'])
kwargs['options'] = options
super(BaseWhatChangedTest, self).assert_console_output(*output, **kwargs)
def workspace(self, files=None, parent=None, diffspec=None, diff_files=None):
class MockWorkspace(Workspace):
def touched_files(_, p):
self.assertEqual(parent or 'HEAD', p)
return files or []
def changes_in(_, ds):
self.assertEqual(diffspec, ds)
return diff_files or []
return MockWorkspace()
class WhatChangedTestBasic(BaseWhatChangedTest):
def test_nochanges(self):
self.assert_console_output(workspace=self.workspace())
def test_parent(self):
self.assert_console_output(options={'changes_since': '42'},
workspace=self.workspace(parent='42'))
def test_files(self):
self.assert_console_output(
'a/b/c',
'd',
'e/f',
options={'files': True},
workspace=self.workspace(files=['a/b/c', 'd', 'e/f'])
)
class WhatChangedTest(BaseWhatChangedTest):
def setUp(self):
super(WhatChangedTest, self).setUp()
self.add_to_build_file('root', dedent("""
source_root('src/py', python_library, resources)
source_root('resources/a1', resources)
"""))
self.add_to_build_file('root/src/py/a', dedent("""
python_library(
name='alpha',
sources=['b/c', 'd'],
resources=['test.resources']
)
jar_library(
name='beta',
jars=[
jar(org='gamma', name='ray', rev='1.137.bruce_banner')
]
)
"""))
self.add_to_build_file('root/src/py/1', dedent("""
python_library(
name='numeric',
sources=['2']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/a', dedent("""
python_library(
name='a',
sources=['a.py'],
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/b', dedent("""
python_library(
name='b',
sources=['b.py'],
dependencies=['root/src/py/dependency_tree/a']
)
"""))
self.add_to_build_file('root/src/py/dependency_tree/c', dedent("""
python_library(
name='c',
sources=['c.py'],
dependencies=['root/src/py/dependency_tree/b']
)
"""))
self.add_to_build_file('root/src/thrift', dedent("""
java_thrift_library(
name='thrift',
sources=['a.thrift']
)
python_thrift_library(
name='py-thrift',
sources=['a.thrift']
)
"""))
self.add_to_build_file('root/resources/a', dedent("""
resources(
name='a_resources',
sources=['a.resources']
)
"""))
self.add_to_build_file('root/src/java/a', dedent("""
java_library(
name='a_java',
sources=rglobs("*.java"),
)
"""))
self.add_to_build_file('root/3rdparty/BUILD.twitter', dedent("""
jar_library(
name='dummy',
jars=[
jar(org='foo', name='ray', rev='1.45')
])
"""))
self.add_to_build_file('root/3rdparty/BUILD', dedent("""
jar_library(
name='dummy1',
jars=[
jar(org='foo1', name='ray', rev='1.45')
])
"""))
# This is a directory that might confuse case insensitive file systems (on macs for example).
# It should not be treated as a BUILD file.
self.create_dir('root/scripts/a/build')
self.add_to_build_file('root/scripts/BUILD', dedent("""
java_library(
name='scripts',
sources=['a/build/scripts.java'],
)
"""))
def test_spec_excludes(self):
self.assert_console_output(
'root/src/py/a:alpha',
options={'spec_excludes': 'root/src/py/1'},
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d'])
)
def test_owned(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
workspace=self.workspace(files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'])
)
def test_multiply_owned(self):
self.assert_console_output(
'root/src/thrift:thrift',
'root/src/thrift:py-thrift',
workspace=self.workspace(files=['root/src/thrift/a.thrift'])
)
def test_build(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/a:beta',
workspace=self.workspace(files=['root/src/py/a/BUILD'])
)
def test_resource_changed(self):
self.assert_console_output(
'root/src/py/a:alpha',
workspace=self.workspace(files=['root/src/py/a/test.resources'])
)
def test_resource_changed_for_java_lib(self):
self.assert_console_output(
'root/resources/a:a_resources',
workspace=self.workspace(files=['root/resources/a/a.resources'])
)
def test_build_sibling(self):
self.assert_console_output(
'root/3rdparty:dummy',
workspace=self.workspace(files=['root/3rdparty/BUILD.twitter'])
)
def test_resource_type_error(self):
self.add_to_build_file('root/resources/a1', dedent("""
java_library(
name='a1',
sources=['a1.test'],
resources=[1]
)
"""))
self.assert_console_raises(
Exception,
workspace=self.workspace(files=['root/resources/a1/a1.test'])
)
def test_build_directory(self):
# This should ensure that a directory named the same as build files does not cause an exception.
self.assert_console_output(
'root/scripts:scripts',
workspace=self.workspace(files=['root/scripts/a/build', 'root/scripts/a/build/scripts.java'])
)
def test_fast(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'fast': True},
workspace=self.workspace(
files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec(self):
self.assert_console_output(
'root/src/py/a:alpha',
'root/src/py/1:numeric',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/py/a/b/c', 'root/src/py/a/d', 'root/src/py/1/2'],
),
)
def test_diffspec_removed_files(self):
self.assert_console_output(
'root/src/java/a:a_java',
options={'diffspec': '42'},
workspace=self.workspace(
diffspec='42',
diff_files=['root/src/java/a/b/c/Foo.java'],
),
)
def test_include_dependees(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
options={'include_dependees': 'direct'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_exclude(self):
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/b:b',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive'},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
self.assert_console_output(
'root/src/py/dependency_tree/a:a',
'root/src/py/dependency_tree/c:c',
options={'include_dependees': 'transitive', 'exclude_target_regexp': [':b']},
workspace=self.workspace(files=['root/src/py/dependency_tree/a/a.py'])
)
def test_deferred_sources(self):
self.add_to_build_file('root/proto', dedent("""
java_protobuf_library(name='unpacked_jars',
sources=from_target(':external-source'),
)
unpacked_jars(name='external-source',
libraries=[':external-source-jars'],
include_patterns=[
'com/squareup/testing/**/*.proto',
],
)
jar_library(name='external-source-jars',
jars=[
jar(org='com.squareup.testing.protolib', name='protolib-external-test', rev='0.0.2'),
],
)
"""))
self.assert_console_output(
'root/proto:unpacked_jars',
'root/proto:external-source',
'root/proto:external-source-jars',
workspace=self.workspace(files=['root/proto/BUILD'])
)
| 29.662088
| 100
| 0.643605
| 9,446
| 0.874873
| 0
| 0
| 707
| 0.065481
| 0
| 0
| 4,822
| 0.446606
|
db1c1956b75c3a0483a601da0add4f5327ce2ad0
| 364
|
py
|
Python
|
utils/image_utils.py
|
novicasarenac/car-racing-rl
|
5bb3b2c47fb6ceda3e8f2c149485652da5a079ba
|
[
"MIT"
] | 10
|
2019-08-08T03:17:39.000Z
|
2021-12-15T08:43:29.000Z
|
utils/image_utils.py
|
novicasarenac/car-racing-rl
|
5bb3b2c47fb6ceda3e8f2c149485652da5a079ba
|
[
"MIT"
] | 7
|
2019-11-29T04:00:22.000Z
|
2022-03-11T23:38:20.000Z
|
utils/image_utils.py
|
novicasarenac/car-racing-rl
|
5bb3b2c47fb6ceda3e8f2c149485652da5a079ba
|
[
"MIT"
] | 4
|
2019-11-28T10:14:48.000Z
|
2020-04-08T08:10:37.000Z
|
import PIL
import numpy as np
def to_grayscale(img):
return np.dot(img, [0.299, 0.587, 0.144])
def zero_center(img):
return img - 127.0
def crop(img, bottom=12, left=6, right=6):
height, width = img.shape
return img[0: height - bottom, left: width - right]
def save(img, path):
pil_img = PIL.Image.fromarray(img)
pil_img.save(path)
| 17.333333
| 55
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
db1c1d0e4cd2adbba4dafd1f97c64d82fddfdf36
| 102
|
py
|
Python
|
sharing_groups/apps.py
|
sthagen/misp-hub
|
5b528b40796a74dc7e8367d75cb3c84920b87bfb
|
[
"BSD-3-Clause"
] | 2
|
2020-10-08T18:35:04.000Z
|
2020-10-08T18:35:08.000Z
|
sharing_groups/apps.py
|
sthagen/misp-hub
|
5b528b40796a74dc7e8367d75cb3c84920b87bfb
|
[
"BSD-3-Clause"
] | null | null | null |
sharing_groups/apps.py
|
sthagen/misp-hub
|
5b528b40796a74dc7e8367d75cb3c84920b87bfb
|
[
"BSD-3-Clause"
] | 1
|
2020-10-08T18:35:17.000Z
|
2020-10-08T18:35:17.000Z
|
from django.apps import AppConfig
class SharingGroupsConfig(AppConfig):
name = 'sharing_groups'
| 17
| 37
| 0.784314
| 65
| 0.637255
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.156863
|
db1fd3d38056cafb0f7ff39c5a005804f923571f
| 5,310
|
py
|
Python
|
GoogleCloud/backend.py
|
ryanjsfx2424/HowToNFTs
|
f4cff7ad676d272815bd936eb142556f92540a32
|
[
"MIT"
] | null | null | null |
GoogleCloud/backend.py
|
ryanjsfx2424/HowToNFTs
|
f4cff7ad676d272815bd936eb142556f92540a32
|
[
"MIT"
] | null | null | null |
GoogleCloud/backend.py
|
ryanjsfx2424/HowToNFTs
|
f4cff7ad676d272815bd936eb142556f92540a32
|
[
"MIT"
] | null | null | null |
## backend.py
"""
The purpose of this script is to continuously monitor the blockchain to
1) determine if a holder aquires or loses an NFT:
2) if they do, generate a new image/movie for the tokens they hold,
3) upload the new image/movie to the hosting service
4) update the metadata file
Repeat :)
(The above ordering matters!)
"""
## use python3!!!
import os
import io
import json
from web3 import Web3
## PARAMETERS
DEPLOYER_ADDRESS = "0x01656d41e041b50fc7c1eb270f7d891021937436"
INFURA_URL = "https://rinkeby.infura.io/v3/37de3193ccf345fe810932c3d0f103d8"
EXT_IMG = ".mp4"
EXT_METADATA = ".json"
ADDRESS = "0xb552E0dDd94EA72DBc089619115c81529cd8CA70" # address for deployed smart contract
## web3 stuff
w3 = Web3(Web3.HTTPProvider(INFURA_URL))
with open("../contract/abi_v020.json", "r") as fid:
rl = "".join(fid.readlines())
abi = json.loads(rl)
# end with open
## goal is to update token URI based on how many are held
## by that owner (but deployer doesn't count!)
contract = w3.eth.contract(address=ADDRESS, abi=abi)
totalSupply = contract.functions.totalSupply().call()
print("total supply: ", totalSupply)
for ii in range(totalSupply):
token = contract.functions.tokenByIndex(ii).call()
owner = contract.functions.ownerOf(token).call()
tokenList = contract.functions.walletOfOwner(owner).call()
## string comparison fails for some mysterious reason
if int(owner,16) == int(DEPLOYER_ADDRESS,16):
tokenList = [ii+1]
# end if
print("token: ", token)
print("owner: ", owner)
print("tokenList: ", tokenList)
newTokenName = str(token)
for jj in range(len(tokenList)):
if tokenList[jj] != token:
newTokenName += "_" + str(tokenList[jj])
# end if
# end for jj
print("newTokenName: ", newTokenName)
## first, check if metadata on hosting service has newTokenName.
## if so, we're good! If not, update it!
old_foos = []
metadata_correct = False
os.system("gsutil ls gs://how-to-nfts-metadata/foo" + str(token) + ".txt"
+ " > foo_file0.txt")
os.system("gsutil ls gs://how-to-nfts-metadata/foo" + str(token) + "_*.txt"
+ " > foo_file1.txt")
for jj in range(2):
with open("foo_file" + str(jj) + ".txt", "r") as fid:
for line in fid:
old_foos.append(line)
if "foo" + newTokenName + ".txt" in line:
metadata_correct = True
# end if
# end for
# end with
os.system("rm foo_file" + str(jj) + ".txt")
# end for jj
print("old_foos: ", old_foos)
if metadata_correct:
print("metadata correct (supposedly) so skipping")
continue
# end if
if len(old_foos) > 1:
print("error! only expected one old foo file.")
raise
# end if
old_foo = old_foos[0][:-1] # strip trailing newline character
old_foo = old_foo.split("metadata/")[1]
print("old_foo: ", old_foo)
## evidently metadata is not correct...
## first, we generate a new movie (if needed) and rsync with
## the GCP bucket.
## then, we'll update the metadata file, remove the old foo
## file and touch a new one
## then we'll rsync the metadata folder with the bucket.
target = "../nftmp4s/HowToKarate" + str(token) + ".mp4"
destination = "../nftmp4s/HowToKarate" + newTokenName + ".mp4"
if not os.path.exists(destination):
os.system("cp " + target + " " + destination)
for jj in range(len(tokenList)):
if tokenList[jj] != token:
print("destination: ", destination)
print("tokenList[jj]: ", tokenList[jj])
os.system('ffmpeg -y -i ' + destination + ' -i nftmp4s/HowToKarate' + str(tokenList[jj]) + '.mp4' + \
' -filter_complex "[0:v] [1:v]' + \
' concat=n=2:v=1 [v]"' + \
' -map "[v]" ' + "concat.mp4")
os.system("mv concat.mp4 " + destination)
# end if
# end for jj
## note, can rsync in parallel via rsync -m...
os.system("gsutil rsync ../nftmp4s/ gs://how-to-nfts-data/")
# end if
## next, we'll update the metadata file, remove the old foo
## file and touch a new one
## then we'll rsync the metadata folder with the bucket.
os.system("cp ../metadata/" + str(token) + ".json temp.json")
with open("../metadata/" + str(token) + ".json", "w") as fid_write:
with open("temp.json", "r") as fid_read:
for line in fid_read:
if '"image":' in line:
line = line.split("HowToKarate")[0] + "HowToKarate" + \
str(newTokenName) + '.mp4",\n'
# end i
fid_write.write(line)
# end for line
# end with open write
# end with open read
os.system("rm temp.json")
os.system("touch ../metadata/foo" + str(newTokenName) + ".txt")
os.system("rm ../metadata/" + old_foo)
## last, we need to update the _metadata file and then rsync.
with open("../metadata/_metadata.json", "w") as fid_write:
fid_write.write("{\n")
for jj in range(1,25):
with open("../metadata/" + str(jj) + ".json", "r") as fid_read:
for line in fid_read:
if "}" in line and len(line) == 2 and jj != 24:
line = "},\n"
# end if
fid_write.write(line)
# end for
# end with open
fid_write.write("}")
# end with open
os.system("gsutil rsync -d ../metadata/ gs://how-to-nfts-metadata/")
# end for ii
## end test.py
| 32.378049
| 109
| 0.628625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,632
| 0.495669
|
db20e9e55635779f1f3c32e48206263757ae91d0
| 10,875
|
py
|
Python
|
dependencies/pyffi/formats/tga/__init__.py
|
korri123/fnv-blender-niftools-addon
|
ce8733e011c7d74c79be265832e1b06e85faf5ee
|
[
"BSD-3-Clause"
] | 4
|
2021-09-27T09:58:44.000Z
|
2022-02-05T16:12:28.000Z
|
io_scene_niftools_updater/backup/dependencies/pyffi/formats/tga/__init__.py
|
korri123/fnv-blender-niftools-addon
|
ce8733e011c7d74c79be265832e1b06e85faf5ee
|
[
"BSD-3-Clause"
] | 5
|
2019-11-10T16:20:09.000Z
|
2019-12-02T14:23:58.000Z
|
.venv/Lib/site-packages/pyffi/formats/tga/__init__.py
|
ndaley7/BodySlide-Group-Generator
|
3ed7b78c5f5ccec103b6bf06bc24398cfb6ad014
|
[
"BSD-3-Clause"
] | null | null | null |
"""
:mod:`pyffi.formats.tga` --- Targa (.tga)
=========================================
Implementation
--------------
.. autoclass:: TgaFormat
:show-inheritance:
:members:
Regression tests
----------------
Read a TGA file
^^^^^^^^^^^^^^^
>>> # check and read tga file
>>> import os
>>> from os.path import dirname
>>> dirpath = __file__
>>> for i in range(4): #recurse up to root repo dir
... dirpath = dirname(dirpath)
>>> repo_root = dirpath
>>> format_root = os.path.join(repo_root, 'tests', 'formats', 'tga')
>>> file = os.path.join(format_root, 'test.tga').replace("\\\\", "/")
>>> stream = open(file, 'rb')
>>> data = TgaFormat.Data()
>>> data.inspect(stream)
>>> data.read(stream)
>>> stream.close()
>>> data.header.width
60
>>> data.header.height
20
Parse all TGA files in a directory tree
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> for stream, data in TgaFormat.walkData(format_root):
... try:
... # the replace call makes the doctest also pass on windows
... os_path = stream.name
... split = (os_path.split(os.sep))[-4:]
... rejoin = os.path.join(*split).replace("\\\\", "/")
... print("reading %s" % rejoin)
... except Exception:
... print(
... "Warning: read failed due corrupt file,"
... " corrupt format description, or bug.") # doctest: +REPORT_NDIFF
reading tests/formats/tga/test.tga
reading tests/formats/tga/test_footer.tga
Create a TGA file from scratch and write to file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> data = TgaFormat.Data()
>>> from tempfile import TemporaryFile
>>> stream = TemporaryFile()
>>> data.write(stream)
>>> stream.close()
"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, Python File Format Interface
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Python File Format Interface
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import struct, os, re
import pyffi.object_models.xml
import pyffi.object_models.common
import pyffi.object_models.xml.basic
import pyffi.object_models.xml.struct_
import pyffi.object_models
import pyffi.utils.graph
from pyffi.utils.graph import EdgeFilter
class TgaFormat(pyffi.object_models.xml.FileFormat):
"""This class implements the TGA format."""
xml_file_name = 'tga.xml'
# where to look for tga.xml and in what order:
# TGAXMLPATH env var, or TgaFormat module directory
xml_file_path = [os.getenv('TGAXMLPATH'), os.path.dirname(__file__)]
# filter for recognizing tga files by extension
RE_FILENAME = re.compile(r'^.*\.tga$', re.IGNORECASE)
# basic types
int = pyffi.object_models.common.Int
uint = pyffi.object_models.common.UInt
byte = pyffi.object_models.common.Byte
ubyte = pyffi.object_models.common.UByte
char = pyffi.object_models.common.Char
short = pyffi.object_models.common.Short
ushort = pyffi.object_models.common.UShort
float = pyffi.object_models.common.Float
PixelData = pyffi.object_models.common.UndecodedData
class FooterString(pyffi.object_models.xml.basic.BasicBase):
"""The Targa footer signature."""
def __str__(self):
return 'TRUEVISION-XFILE.\x00'
def read(self, stream, data):
"""Read signature from stream.
:param stream: The stream to read from.
:type stream: file
"""
signat = stream.read(18)
if signat != self.__str__().encode("ascii"):
raise ValueError(
"invalid Targa signature: expected '%s' but got '%s'"
%(self.__str__(), signat))
def write(self, stream, data):
"""Write signature to stream.
:param stream: The stream to read from.
:type stream: file
"""
stream.write(self.__str__().encode("ascii"))
def get_value(self):
"""Get signature.
:return: The signature.
"""
return self.__str__()
def set_value(self, value):
"""Set signature.
:param value: The value to assign.
:type value: str
"""
if value != self.__str__():
raise ValueError(
"invalid Targa signature: expected '%s' but got '%s'"
%(self.__str__(), value))
def get_size(self, data=None):
"""Return number of bytes that the signature occupies in a file.
:return: Number of bytes.
"""
return 18
def get_hash(self, data=None):
"""Return a hash value for the signature.
:return: An immutable object that can be used as a hash.
"""
return self.__str__()
class Image(pyffi.utils.graph.GlobalNode):
def __init__(self):
# children are either individual pixels, or RLE packets
self.children = []
def read(self, stream, data):
data = data
if data.header.image_type in (TgaFormat.ImageType.INDEXED,
TgaFormat.ImageType.RGB,
TgaFormat.ImageType.GREY):
self.children = [
TgaFormat.Pixel(argument=data.header.pixel_size)
for i in range(data.header.width
* data.header.height)]
for pixel in self.children:
pixel.read(stream, data)
else:
self.children = []
count = 0
while count < data.header.width * data.header.height:
pixel = TgaFormat.RLEPixels(
argument=data.header.pixel_size)
pixel.read(stream, data)
self.children.append(pixel)
count += pixel.header.count + 1
def write(self, stream, data):
data = data
for child in self.children:
child.arg = data.header.pixel_size
child.write(stream, data)
def get_detail_child_nodes(self, edge_filter=EdgeFilter()):
for child in self.children:
yield child
def get_detail_child_names(self, edge_filter=EdgeFilter()):
for i in range(len(self.children)):
yield str(i)
class Data(pyffi.object_models.FileFormat.Data):
def __init__(self):
self.header = TgaFormat.Header()
self.image = TgaFormat.Image()
self.footer = None # TgaFormat.Footer() is optional
def inspect(self, stream):
"""Quick heuristic check if stream contains Targa data,
by looking at the first 18 bytes.
:param stream: The stream to inspect.
:type stream: file
"""
# XXX todo: set some of the actual fields of the header
pos = stream.tell()
# read header
try:
id_length, colormap_type, image_type, \
colormap_index, colormap_length, colormap_size, \
x_origin, y_origin, width, height, \
pixel_size, flags = struct.unpack("<BBBHHBHHHHBB",
stream.read(18))
except struct.error:
# could not read 18 bytes
# not a TGA file
raise ValueError("Not a Targa file.")
finally:
stream.seek(pos)
# check if tga type is valid
# check pixel size
# check width and height
if not(image_type in (1, 2, 3, 9, 10, 11)
and pixel_size in (8, 24, 32)
and width <= 100000
and height <= 100000):
raise ValueError("Not a Targa file.")
# this looks like a tga file!
def read(self, stream):
"""Read a tga file.
:param stream: The stream from which to read.
:type stream: ``file``
"""
# read the file
self.inspect(stream) # quick check
# header
self.header.read(stream, self)
# image
self.image.read(stream, self)
# check if we are at the end of the file
if not stream.read(1):
self.footer = None
return
# footer
stream.seek(-26, os.SEEK_END)
self.footer = TgaFormat.Footer()
self.footer.read(stream, self)
def write(self, stream):
"""Write a tga file.
:param stream: The stream to write to.
:type stream: ``file``
"""
self.header.write(stream, self)
self.image.write(stream, self)
if self.footer:
self.footer.write(stream, self)
def get_global_child_nodes(self, edge_filter=EdgeFilter()):
yield self.header
yield self.image
if self.footer:
yield self.footer
def get_global_child_names(self, edge_filter=EdgeFilter()):
yield "Header"
yield "Image"
if self.footer:
yield "Footer"
if __name__ == '__main__':
import doctest
doctest.testmod()
| 34.090909
| 80
| 0.57269
| 7,168
| 0.659126
| 614
| 0.05646
| 0
| 0
| 0
| 0
| 5,357
| 0.492598
|
db2348f24a291f4c0fb84c5876a92a0022f59eed
| 355
|
py
|
Python
|
python/push.py
|
swallowstalker/postopush
|
6ec7e791aff1e3d868711d62e6c702a231bc1d65
|
[
"MIT"
] | 1
|
2020-02-11T03:41:49.000Z
|
2020-02-11T03:41:49.000Z
|
python/push.py
|
swallowstalker/postopush
|
6ec7e791aff1e3d868711d62e6c702a231bc1d65
|
[
"MIT"
] | null | null | null |
python/push.py
|
swallowstalker/postopush
|
6ec7e791aff1e3d868711d62e6c702a231bc1d65
|
[
"MIT"
] | null | null | null |
import telegram
import os
def main():
token = os.getenv("TOKEN", None)
message = os.getenv("MESSAGE", "No message, please set MESSAGE env")
chat_id = os.getenv("CHAT_ID", None)
bot = telegram.Bot(token=token)
bot.send_message(chat_id=chat_id, text=message, parse_mode=telegram.ParseMode.HTML)
if __name__ == "__main__":
main()
| 23.666667
| 87
| 0.687324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.2
|
db23585c3e9e1de8759f993492930d5a53b54101
| 4,309
|
py
|
Python
|
advent-of-code-2018/day 13/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
advent-of-code-2018/day 13/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
advent-of-code-2018/day 13/main.py
|
gikf/advent-of-code
|
923b026ce87121b73093554734746c2ecb17c5e2
|
[
"MIT"
] | null | null | null |
"""Advent of Code 2018 Day 13."""
from copy import deepcopy
CARTS = '<>^v'
INTERSECTION = '+'
CURVES = '\\/'
cart_to_direction = {
'<': 180,
'^': 90,
'>': 0,
'v': 270,
}
direction_to_move = {
0: (0, 1),
90: (-1, 0),
180: (0, -1),
270: (1, 0),
}
direction_to_cart = {
0: '>',
90: '^',
180: '<',
270: 'v',
}
turns = {
0: 90,
1: 0,
2: -90,
}
next_direction = {
0: {
'\\': 270,
'/': 90,
},
90: {
'\\': 180,
'/': 0,
},
180: {
'\\': 90,
'/': 270,
},
270: {
'\\': 0,
'/': 180,
},
}
def main(file_input='input.txt'):
lines = [[*line.strip('\n')] for line in get_file_contents(file_input)]
carts = find_carts(lines)
tracks = remove_carts(lines)
collision = follow_tracks(tracks, deepcopy(carts))
print('First collision:', ','.join(str(num) for num in collision[::-1]))
last_cart_location = follow_tracks(tracks, deepcopy(carts), True)
print('Last cart position after all crashes:',
','.join(str(num) for num in last_cart_location[::-1]))
def follow_tracks(tracks, carts, prevent_collision=False):
"""Follow tracks with carts. Optionally prevent ending with collision."""
while len(carts) > 1:
carts, collisions = move_carts(tracks, carts)
if collisions and not prevent_collision:
return collisions[0]
return carts[0][0]
def find_repeated_position(carts):
"""Find position taken by two carts - colliding."""
repeated = []
seen_positions = set()
for cur_position, *_ in carts:
position = tuple(cur_position)
if position in seen_positions:
repeated.append(cur_position)
seen_positions.add(position)
return repeated
def move_carts(tracks, carts):
"""Move carts by one on tracks."""
collisions = []
for cart in sorted(carts):
position, direction, turn = cart
move = direction_to_move[direction]
next_position = [pos + change for pos, change in zip(position, move)]
next_square = get_square(tracks, next_position)
if next_square == INTERSECTION:
next_direction, next_turn = turn_cart(direction, turn)
cart[1] = next_direction
cart[2] = next_turn
elif is_curve(next_square):
next_direction = curve_cart(direction, next_square)
cart[1] = next_direction
cart[0] = next_position
repeated_position = find_repeated_position(carts)
if repeated_position:
collisions.extend(repeated_position)
carts = remove_collided_carts(carts, repeated_position)
return carts, collisions
def remove_collided_carts(carts, repeated_position):
"""Remove carts colliding on the repeated_position."""
return [cart for cart in carts
if cart[0] not in repeated_position]
def curve_cart(direction, curve):
"""Move cart over the curve."""
return next_direction[direction][curve]
def turn_cart(direction, turn):
"""Turn cart from direction, depending on the turn type."""
return (direction + turns[turn]) % 360, (turn + 1) % len(turns)
def is_curve(square):
"""Check if square is one of the curves."""
return square in CURVES
def get_square(tracks, position):
"""Get square from tracks with position."""
row, col = position
return tracks[row][col]
def remove_carts(lines):
"""Remove carts from lines, replacing them with normal tracks."""
for row_no, row in enumerate(lines):
for col_no, square in enumerate(row):
if square in '<>':
lines[row_no][col_no] = '-'
elif square in 'v^':
lines[row_no][col_no] = '|'
return lines
def find_carts(lines):
"""Find carts in lines. Return list of lists with cart parameters."""
carts = []
for row_no, row in enumerate(lines):
for col_no, square in enumerate(row):
if square not in CARTS:
continue
carts.append([[row_no, col_no], cart_to_direction[square], 0])
return carts
def get_file_contents(file):
"""Read all lines from file."""
with open(file) as f:
return f.readlines()
if __name__ == '__main__':
main()
| 26.115152
| 77
| 0.598747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 754
| 0.174983
|
db24a982814e1d245a07e054f71ca678690fe6ad
| 13,037
|
py
|
Python
|
goopylib/applications/custom_ease.py
|
YuvrajThorat/goopylib
|
b6bc593b7bcc92498a507f34b2190365a0ac51e7
|
[
"MIT"
] | null | null | null |
goopylib/applications/custom_ease.py
|
YuvrajThorat/goopylib
|
b6bc593b7bcc92498a507f34b2190365a0ac51e7
|
[
"MIT"
] | null | null | null |
goopylib/applications/custom_ease.py
|
YuvrajThorat/goopylib
|
b6bc593b7bcc92498a507f34b2190365a0ac51e7
|
[
"MIT"
] | null | null | null |
from goopylib.imports import *
from pathlib import Path as pathlib_Path
# I kinda wanted to scrap this, it wasn't that good.
def create_custom_ease():
window = Window(title="goopylib: Create Custom Ease", width=get_screen_size()[1] * 0.7,
height=get_screen_size()[1] * 0.7, autoflush=False, bk_colour=DARKER_GREY)
window.set_coords(0, 0, 1000, 1000)
path = f"{pathlib_Path(__file__).parent.absolute()}/textures/"
Image(Point(500, 500), f"{path}background.png").draw(window)
add_button = Button(Image(Point(882, 219), f"{path}AddButton.png"),
Image(Point(882, 219), f"{path}AddButton.png").resize_factor(1.03),
Image(Point(882, 219), f"{path}AddButton.png").resize_factor(1.07),
Image(Point(882, 219), f"{path}AddButton.png").convert_greyscale()).draw(window)
clear_button = Button(Image(Point(882, 280), f"{path}ClearButton.png"),
Image(Point(882, 280), f"{path}ClearButton.png").resize_factor(1.03),
Image(Point(882, 280), f"{path}ClearButton.png").resize_factor(1.07)).draw(window)
play_button = Button(Image(Point(256, 805), f"{path}PlayButton.png"),
Image(Point(256, 805), f"{path}PlayButton.png").resize_factor(1.03),
Image(Point(256, 805), f"{path}PlayButton.png").resize_factor(1.07)).draw(window)
shape_button = CycleButton(0,
Button(Image(Point(99, 805), f"{path}RectangleButton.png"),
Image(Point(99, 805), f"{path}RectangleButton.png").resize_factor(1.03),
Image(Point(99, 805), f"{path}RectangleButton.png").resize_factor(1.07)),
Button(Image(Point(99, 805), f"{path}CircleButton.png"),
Image(Point(99, 805), f"{path}CircleButton.png").resize_factor(1.03),
Image(Point(99, 805), f"{path}CircleButton.png").resize_factor(1.07))) \
.draw(window)
interpolation_button = CycleButton(0,
Button(Image(Point(882, 109), f"{path}BezierButton.png"),
Image(Point(882, 109), f"{path}BezierButton.png").resize_factor(1.03),
Image(Point(882, 109), f"{path}BezierButton.png").resize_factor(1.07)),
Button(Image(Point(882, 109), f"{path}CubicButton.png"),
Image(Point(882, 109), f"{path}CubicButton.png").resize_factor(1.03),
Image(Point(882, 109), f"{path}CubicButton.png").resize_factor(1.07)),
Button(Image(Point(882, 109), f"{path}LinearButton.png"),
Image(Point(882, 109), f"{path}LinearButton.png").resize_factor(1.03),
Image(Point(882, 109), f"{path}LinearButton.png").resize_factor(1.07))) \
.draw(window)
template_button = CycleButton(0,
Button(Image(Point(882, 411), f"{path}LinearTemplate.png"),
Image(Point(882, 411), f"{path}LinearTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}LinearTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}BackTemplate.png"),
Image(Point(882, 411), f"{path}BackTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}BackTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}ExponentialTemplate.png"),
Image(Point(882, 411), f"{path}ExponentialTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}ExponentialTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}PolynomialTemplate.png"),
Image(Point(882, 411), f"{path}PolynomialTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}PolynomialTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}SineTemplate.png"),
Image(Point(882, 411), f"{path}SineTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}SineTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}CircleTemplate.png"),
Image(Point(882, 411), f"{path}CircleTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}CircleTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}ElasticTemplate.png"),
Image(Point(882, 411), f"{path}ElasticTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}ElasticTemplate.png").resize_factor(1.07)),
Button(Image(Point(882, 411), f"{path}BounceTemplate.png"),
Image(Point(882, 411), f"{path}BounceTemplate.png").resize_factor(1.03),
Image(Point(882, 411), f"{path}BounceTemplate.png").resize_factor(1.07)),
disabled_graphic=Image(Point(882, 428), f"{path}CustomTemplate.png")).draw(window)
save_button = Button(Image(Point(882, 647), f"{path}SaveButton.png"),
Image(Point(882, 647), f"{path}SaveButton.png").resize_factor(1.03),
Image(Point(882, 647), f"{path}SaveButton.png").resize_factor(1.07)).draw(window)
open_button = Button(Image(Point(882, 708), f"{path}OpenButton.png"),
Image(Point(882, 708), f"{path}OpenButton.png").resize_factor(1.03),
Image(Point(882, 708), f"{path}OpenButton.png").resize_factor(1.07)).draw(window)
simulation_graphic1 = CycleButton(0, Rectangle(Point(50, 875), Point(125, 950), fill=LIGHTER_BLUE, outline_width=0),
Circle(Point(88, 913), 38, fill=LIGHTER_BLUE, outline_width=0),
autoflush=False).draw(window)
simulation_graphic2 = CycleButton(0,
Rectangle(Point(845, 845), Point(920, 920), fill=LIGHTER_BLUE, outline_width=0),
Circle(Point(883, 883), 37.5, fill=LIGHTER_BLUE, outline_width=0),
autoflush=False).draw(window)
graph = Image(Point(500, 500), f"{path}Graph.png").draw(window)
delete_dropdown = Button(Image(Point(0, 0), f"{path}DeleteDropdown.png"),
Image(Point(0, 0), f"{path}DeleteDropdownHover.png"))
colour_grad = colour_gradient(LIGHTER_VIOLET, DARKEST_VIOLET, 100)
resolution = 2
control_points = [Circle(p, radius=10, fill=VIOLET, outline_width=0).draw(window) for p in
[Point(110, 673), Point(668, 118)]]
last_control_points = control_points.copy()
points = []
for obj in control_points:
points.append(obj.anchor)
curve = []
for t in range(int(10 ** resolution)):
t /= 10 ** resolution
curve.append(py_bezier_curve(t, points))
circle_objects = []
for i in range(int(10 ** resolution)):
circle_objects.append(Circle(curve[i], radius=2, fill=colour_grad[i], outline_width=0).draw(window))
selected_point = None
selected_curve_point = None
def ease(time):
points = []
for obj in control_points:
points.append(obj.anchor)
if interpolation_button.get_state() == 0:
return 1 - (py_bezier_curve(time, points).y - 118) / 555
else:
return 1 - (LinearInterpolation(time, points).y - 118) / 555
def update_curve():
nonlocal circle_objects, curve, point, last_control_points, t, i
points = []
for obj in control_points:
points.append(obj.anchor)
curve = []
for t in range(int(10 ** resolution)):
t /= 10 ** resolution
if interpolation_button.get_state() == 0:
curve.append(py_bezier_curve(t, points))
else:
curve.append(LinearInterpolation(t, points))
for i in range(int(10 ** resolution)):
circle_objects[i].move_to_y(curve[i].y)
last_control_points = control_points.copy()
while True:
t = time.time()
mouse_pos = window.check_left_mouse_click()
if mouse_pos is not None:
if open_button.is_clicked(mouse_pos):
filename = openfilebrowser()
elif save_button.is_clicked(mouse_pos):
filename = openfilebrowser()
elif interpolation_button.is_clicked(mouse_pos):
update_curve()
elif play_button.is_clicked(mouse_pos):
play_button.disable()
"""
x = []
for t in range(int(10 ** resolution)):
t /= 10 ** resolution
x.append(ease(t))
plt.plot(x)
plt.show()"""
simulation_graphic1.glide_x(500, time=2, easing=ease)
simulation_graphic2.animate_rotate(360, time=2, easing=ease)
elif shape_button.is_clicked(mouse_pos):
simulation_graphic1.set_state(shape_button.get_state())
simulation_graphic2.set_state(shape_button.get_state())
elif clear_button.is_clicked(mouse_pos):
for point in control_points[1:-1]:
point.undraw()
control_points.remove(point)
elif add_button.is_clicked(mouse_pos):
add_button.disable()
if selected_curve_point is None:
i = int((10 ** resolution / 2) * 2 ** (2 - len(control_points)))
control_points.insert(1, Circle(curve[i], radius=10, fill=colour_grad[i], outline_width=0).draw(
window))
control_points[1].set_draggable(callback=update_curve)
else:
i = max(
math.ceil(circle_objects.index(selected_curve_point) / (100 / (len(control_points) - 1))) - 1,
1)
control_points.insert(i, Circle(selected_curve_point.get_anchor(), radius=10,
fill=colour_grad[circle_objects.index(selected_curve_point)],
outline_width=0).draw(window))
selected_curve_point = None
control_points[i].set_draggable(callback=update_curve)
add_button.enable()
template_button.disable()
elif delete_dropdown.is_clicked(mouse_pos):
delete_dropdown.undraw()
selected_point.undraw()
control_points.remove(selected_point)
if len(control_points) == 2:
template_button.enable()
else:
for point in circle_objects:
if point.is_clicked(mouse_pos):
for i, p in enumerate(circle_objects):
p.set_fill(colour_grad[i])
point.set_fill(WHITE)
selected_curve_point = point
break
if last_control_points != control_points:
update_curve()
if play_button.is_disabled and not simulation_graphic1.is_gliding:
play_button.enable()
mouse_pos = window.check_right_mouse_click()
for point in control_points[1:-1]:
if point.is_clicked(mouse_pos):
delete_dropdown.draw(window).move_to_point(point.anchor, align="topleft")
selected_point = point
window.update()
def create_custom_ease2():
window = Window(title="goopylib_b: Create Custom Ease", width=get_screen_size()[1] * 0.7,
height=get_screen_size()[1] * 0.7, autoflush=False, bk_colour=DARKER_GREY)
window.set_coords(0, 0, 1000, 1000)
path = f"{pathlib_Path(__file__).parent.absolute()}/textures/"
while True:
if window.is_closed():
break
window.update()
window.close()
| 49.570342
| 120
| 0.535246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,993
| 0.152873
|
db26ca941f83e142751cfd4f2744ef8039848b25
| 537
|
py
|
Python
|
app/lib/duplication_check/train.py
|
WHUT-XGP/ASoulCnki
|
98f29532e43e73f8e364d55b284558de5803b8b9
|
[
"Apache-2.0"
] | null | null | null |
app/lib/duplication_check/train.py
|
WHUT-XGP/ASoulCnki
|
98f29532e43e73f8e364d55b284558de5803b8b9
|
[
"Apache-2.0"
] | null | null | null |
app/lib/duplication_check/train.py
|
WHUT-XGP/ASoulCnki
|
98f29532e43e73f8e364d55b284558de5803b8b9
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Filename :train.py
Description :获取小作文摘要
Time :2021/06/22 15:21:08
Author :hwa
Version :1.0
"""
from app.lib.duplication_check.reply_database import ReplyDatabase
import time
def train_data():
start_time = time.time()
db = ReplyDatabase.load_from_json("data/bilibili_cnki_reply.json")
db.dump_to_image("database.dat")
end_time = time.time()
print("train cost {} s".format(end_time - start_time))
if __name__ == "__main__":
train_data()
| 23.347826
| 70
| 0.646182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 253
| 0.459165
|
db28a45f5705fff1d415e5578ed431780d73980b
| 5,837
|
py
|
Python
|
buildscripts/task_generation/evg_config_builder.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/task_generation/evg_config_builder.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
buildscripts/task_generation/evg_config_builder.py
|
benety/mongo
|
203430ac9559f82ca01e3cbb3b0e09149fec0835
|
[
"Apache-2.0"
] | null | null | null |
"""Builder for generating evergreen configuration."""
from threading import Lock
from typing import Set, List, Dict
import inject
from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task
from buildscripts.patch_builds.task_generation import validate_task_generation_limit
from buildscripts.task_generation.constants import ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK
from buildscripts.task_generation.gen_task_service import GenTaskService, \
GenTaskOptions, ResmokeGenTaskParams, FuzzerGenTaskParams
from buildscripts.task_generation.generated_config import GeneratedFile, GeneratedConfiguration
from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
from buildscripts.task_generation.suite_split import SuiteSplitService, GeneratedSuite, \
SuiteSplitParameters
from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerTask
# pylint: disable=too-many-instance-attributes
class EvgConfigBuilder:
"""A builder class for building evergreen configuration."""
@inject.autoparams()
def __init__(
self,
resmoke_proxy: ResmokeProxyService,
suite_split_service: SuiteSplitService,
evg_config_gen_service: GenTaskService,
gen_options: GenTaskOptions,
) -> None:
"""
Initialize a new builder.
:param resmoke_proxy: Proxy to access resmoke data.
:param suite_split_service: Service to split suites into sub-suites.
:param evg_config_gen_service: Service to generate evergreen configuration.
:param gen_options: Global options for generating evergreen configuration.
"""
self.resmoke_proxy = resmoke_proxy
self.suite_split_service = suite_split_service
self.evg_config_gen_service = evg_config_gen_service
self.gen_options = gen_options
self.shrub_config = ShrubProject.empty()
self.build_variants: Dict[str, BuildVariant] = {}
self.generated_files: List[GeneratedFile] = []
self.lock = Lock()
def get_build_variant(self, build_variant: str) -> BuildVariant:
"""
Get the build variant object, creating it if it doesn't exist.
NOTE: The `lock` should be held by any functions calling this one.
:param build_variant: Name of build variant.
:return: BuildVariant object being created.
"""
if build_variant not in self.build_variants:
self.build_variants[build_variant] = BuildVariant(build_variant, activate=False)
return self.build_variants[build_variant]
def generate_suite(self, split_params: SuiteSplitParameters,
gen_params: ResmokeGenTaskParams) -> None:
"""
Add configuration to generate a split version of the specified resmoke suite.
:param split_params: Parameters of how resmoke suite should be split.
:param gen_params: Parameters of how evergreen configuration should be generated.
"""
generated_suite = self.suite_split_service.split_suite(split_params)
with self.lock:
build_variant = self.get_build_variant(generated_suite.build_variant)
resmoke_tasks = self.evg_config_gen_service.generate_task(generated_suite,
build_variant, gen_params)
self.generated_files.extend(self.resmoke_proxy.render_suite_files(resmoke_tasks))
def generate_fuzzer(self, fuzzer_params: FuzzerGenTaskParams) -> FuzzerTask:
"""
Add configuration to generate the specified fuzzer task.
:param fuzzer_params: Parameters of how the fuzzer suite should generated.
"""
with self.lock:
build_variant = self.get_build_variant(fuzzer_params.variant)
return self.evg_config_gen_service.generate_fuzzer_task(fuzzer_params, build_variant)
def add_display_task(self, display_task_name: str, execution_task_names: Set[str],
build_variant: str) -> None:
"""
Add configuration to generate the specified display task.
:param display_task_name: Name of display task to create.
:param execution_task_names: Name of execution tasks to include in display task.
:param build_variant: Name of build variant to add to.
"""
execution_tasks = {ExistingTask(task_name) for task_name in execution_task_names}
with self.lock:
build_variant = self.get_build_variant(build_variant)
build_variant.display_task(display_task_name, execution_existing_tasks=execution_tasks)
def generate_archive_dist_test_debug_activator_task(self, variant: str):
"""
Generate dummy task to activate the task that archives debug symbols.
We can't activate it directly as it's not generated.
"""
with self.lock:
build_variant = self.get_build_variant(variant)
build_variant.add_existing_task(ExistingTask(ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK))
def build(self, config_file_name: str) -> GeneratedConfiguration:
"""
Build the specified configuration and return the files needed to create it.
:param config_file_name: Filename to use for evergreen configuration.
:return: Dictionary of files and contents that are needed to create configuration.
"""
for build_variant in self.build_variants.values():
self.shrub_config.add_build_variant(build_variant)
if not validate_task_generation_limit(self.shrub_config):
raise ValueError("Attempting to generate more than max tasks in single generator")
self.generated_files.append(GeneratedFile(config_file_name, self.shrub_config.json()))
return GeneratedConfiguration(self.generated_files)
| 46.325397
| 99
| 0.716807
| 4,912
| 0.841528
| 0
| 0
| 1,015
| 0.173891
| 0
| 0
| 2,002
| 0.342984
|
db299a97d65e80dbbfa712b50525b811276c7bff
| 4,424
|
py
|
Python
|
test/unit/vint/ast/plugin/scope_plugin/stub_node.py
|
mosheavni/vint
|
9078dd626415cfe37ddaf03032e714bbaca8b336
|
[
"MIT"
] | 538
|
2015-01-03T18:54:53.000Z
|
2020-01-11T01:34:51.000Z
|
test/unit/vint/ast/plugin/scope_plugin/stub_node.py
|
mosheavni/vint
|
9078dd626415cfe37ddaf03032e714bbaca8b336
|
[
"MIT"
] | 235
|
2015-01-01T06:20:01.000Z
|
2020-01-17T11:32:39.000Z
|
test/unit/vint/ast/plugin/scope_plugin/stub_node.py
|
mosheavni/vint
|
9078dd626415cfe37ddaf03032e714bbaca8b336
|
[
"MIT"
] | 43
|
2015-01-23T16:59:49.000Z
|
2019-12-27T10:56:12.000Z
|
from vint.ast.node_type import NodeType
from vint.ast.plugin.scope_plugin.identifier_attribute import (
IDENTIFIER_ATTRIBUTE,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG,
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT,
)
def create_id(id_value, is_declarative=True, is_function=False, is_autoload=False,
is_declarative_parameter=False, is_on_str_expr_context=False):
return {
'type': NodeType.IDENTIFIER.value,
'value': id_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: is_function,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: is_autoload,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: is_declarative_parameter,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: is_on_str_expr_context,
},
}
def create_env(env_value):
return {
'type': NodeType.ENV.value,
'value': env_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_option(opt_value):
return {
'type': NodeType.OPTION.value,
'value': opt_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_reg(reg_value):
return {
'type': NodeType.REG.value,
'value': reg_value,
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: True,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_curlyname(is_declarative=True):
""" Create a node as a `my_{'var'}`
"""
return {
'type': NodeType.CURLYNAME.value,
'value': [
{
'type': NodeType.CURLYNAMEPART.value,
'value': 'my_',
},
{
'type': NodeType.CURLYNAMEEXPR.value,
'value': {
'type': NodeType.CURLYNAMEEXPR.value,
'value': 'var',
},
}
],
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: True,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
def create_subscript_member(is_declarative=True):
return {
'type': NodeType.IDENTIFIER.value,
'value': 'member',
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: False,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: True,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
| 35.96748
| 82
| 0.667043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 178
| 0.040235
|
db29c33820407f3d84d5b4458a06a85d146e75c7
| 1,130
|
py
|
Python
|
core/analyser.py
|
hryu/cpu_usage_analyser
|
bc870c4fd3be873033a7f43612c1a0379d5bf419
|
[
"MIT"
] | null | null | null |
core/analyser.py
|
hryu/cpu_usage_analyser
|
bc870c4fd3be873033a7f43612c1a0379d5bf419
|
[
"MIT"
] | null | null | null |
core/analyser.py
|
hryu/cpu_usage_analyser
|
bc870c4fd3be873033a7f43612c1a0379d5bf419
|
[
"MIT"
] | null | null | null |
class Analyser:
def __init__(self, callbacks, notifiers, state):
self.cbs = callbacks
self.state = state
self.notifiers = notifiers
def on_begin_analyse(self, timestamp):
pass
def on_end_analyse(self, timestamp):
pass
def analyse(self, event):
event_name = event.name
# for 'perf' tool
split_event_name = event.name.split(':')
if len(split_event_name) > 1:
event_name = split_event_name[1].strip()
if event_name in self.cbs:
self.cbs[event_name](event)
elif (event_name.startswith('sys_enter') or \
event_name.startswith('syscall_entry_')) and \
'syscall_entry' in self.cbs:
self.cbs['syscall_entry'](event)
elif (event_name.startswith('sys_exit') or \
event_name.startswith('syscall_exit_')) and \
'syscall_exit' in self.cbs:
self.cbs['syscall_exit'](event)
def notify(self, notification_id, **kwargs):
if notification_id in self.notifiers:
self.notifiers[notification_id](**kwargs)
| 32.285714
| 60
| 0.604425
| 1,129
| 0.999115
| 0
| 0
| 0
| 0
| 0
| 0
| 130
| 0.115044
|
db2cccb8706be958cee0c18ee9e554aac314a720
| 348
|
py
|
Python
|
grpr2-ch/maci/policies/__init__.py
|
saarcohen30/GrPR2-CH
|
ba8c32f5b4caeebfc93ca30fa1fcc8223176183f
|
[
"MIT"
] | null | null | null |
grpr2-ch/maci/policies/__init__.py
|
saarcohen30/GrPR2-CH
|
ba8c32f5b4caeebfc93ca30fa1fcc8223176183f
|
[
"MIT"
] | null | null | null |
grpr2-ch/maci/policies/__init__.py
|
saarcohen30/GrPR2-CH
|
ba8c32f5b4caeebfc93ca30fa1fcc8223176183f
|
[
"MIT"
] | null | null | null |
from .nn_policy import NNPolicy
# from .gmm import GMMPolicy
# from .latent_space_policy import LatentSpacePolicy
from .uniform_policy import UniformPolicy
# from .gaussian_policy import GaussianPolicy
from .stochastic_policy import StochasticNNPolicy, StochasticNNConditionalPolicy
from .deterministic_policy import DeterministicNNPolicy
| 38.666667
| 81
| 0.850575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 128
| 0.367816
|
db2d0faef6bb46b40a8c415250b0a2a6b57926d0
| 3,841
|
py
|
Python
|
sugarpidisplay/sugarpiconfig/views.py
|
szpaku80/SugarPiDisplay
|
793c288afaad1b1b6921b0d29ee0e6a537e42384
|
[
"MIT"
] | 1
|
2022-02-12T20:39:20.000Z
|
2022-02-12T20:39:20.000Z
|
sugarpidisplay/sugarpiconfig/views.py
|
szpaku80/SugarPiDisplay
|
793c288afaad1b1b6921b0d29ee0e6a537e42384
|
[
"MIT"
] | null | null | null |
sugarpidisplay/sugarpiconfig/views.py
|
szpaku80/SugarPiDisplay
|
793c288afaad1b1b6921b0d29ee0e6a537e42384
|
[
"MIT"
] | null | null | null |
"""
Routes and views for the flask application.
"""
import os
import json
from flask import Flask, redirect, request, render_template, flash
from pathlib import Path
from flask_wtf import FlaskForm
from wtforms import StringField,SelectField,PasswordField,BooleanField
from wtforms.validators import InputRequired,ValidationError
from . import app
source_dexcom = 'dexcom'
source_nightscout = 'nightscout'
LOG_FILENAME="sugarpidisplay.log"
folder_name = '.sugarpidisplay'
config_file = 'config.json'
pi_sugar_path = os.path.join(str(Path.home()), folder_name)
Path(pi_sugar_path).mkdir(exist_ok=True)
def dexcom_field_check(form, field):
if (form.data_source.data == source_dexcom):
if (not field.data):
raise ValidationError('Field cannot be empty')
def nightscout_field_check(form, field):
if (form.data_source.data == source_nightscout):
if (not field.data):
raise ValidationError('Field cannot be empty')
class MyForm(FlaskForm):
class Meta:
csrf = False
data_source = SelectField(
'Data Source',
choices=[(source_dexcom, 'Dexcom'), (source_nightscout, 'Nightscout')]
)
use_animation = BooleanField('Use Animation')
dexcom_user = StringField('Dexcom UserName', validators=[dexcom_field_check])
dexcom_pass = PasswordField('Dexcom Password', validators=[dexcom_field_check])
ns_url = StringField('Nightscout URL', validators=[nightscout_field_check])
ns_token = StringField('Nightscout Access Token', validators=[nightscout_field_check])
@app.route('/hello')
def hello_world():
return 'Hello, World!'
@app.route('/success')
def success():
return 'Your device is configured. Now cycle the power and it will use the new settings'
@app.route('/', methods=('GET', 'POST'))
def setup():
form = MyForm()
if request.method == 'POST':
if form.validate() == False:
flash('Fields are missing.')
return render_template('setup.html', form=form)
else:
handle_submit(form)
return redirect('/success')
#if form.is_submitted():
loadData(form)
return render_template('setup.html', form=form)
def handle_submit(form):
config = { 'data_source': form.data_source.data }
config['use_animation'] = form.use_animation.data
if (form.data_source.data == source_dexcom):
config['dexcom_username'] = form.dexcom_user.data
config['dexcom_password'] = form.dexcom_pass.data
else:
config['nightscout_url'] = form.ns_url.data
config['nightscout_access_token'] = form.ns_token.data
#__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(pi_sugar_path, config_file), "w")
json.dump(config, f, indent = 4)
f.close()
def loadData(form):
config_full_path = os.path.join(pi_sugar_path, config_file)
if (not Path(config_full_path).exists()):
return
try:
f = open(config_full_path, "r")
config = json.load(f)
f.close()
if ('data_source' in config):
form.data_source.data = config['data_source']
if (config['data_source'] == source_dexcom):
if ('dexcom_username' in config):
form.dexcom_user.data = config['dexcom_username']
if ('dexcom_password' in config):
form.dexcom_pass.data = config['dexcom_password']
if (config['data_source'] == source_nightscout):
if ('nightscout_url' in config):
form.ns_url.data = config['nightscout_url']
if ('nightscout_access_token' in config):
form.ns_token.data = config['nightscout_access_token']
form.use_animation.data = config['use_animation']
except:
pass
| 35.564815
| 93
| 0.664931
| 587
| 0.152825
| 0
| 0
| 627
| 0.163239
| 0
| 0
| 916
| 0.23848
|
db2d5607d06728d0c91675bdab230c329ed3e400
| 2,001
|
py
|
Python
|
progressao_aritmeticav3.py
|
eduardobaltazarmarfim/PythonC
|
8e44b4f191582c73cca6df98120ab142145c4ba1
|
[
"MIT"
] | null | null | null |
progressao_aritmeticav3.py
|
eduardobaltazarmarfim/PythonC
|
8e44b4f191582c73cca6df98120ab142145c4ba1
|
[
"MIT"
] | null | null | null |
progressao_aritmeticav3.py
|
eduardobaltazarmarfim/PythonC
|
8e44b4f191582c73cca6df98120ab142145c4ba1
|
[
"MIT"
] | null | null | null |
def retorno():
resp=input('Deseja executar o programa novamente?[s/n] ')
if(resp=='S' or resp=='s'):
verificar()
else:
print('Processo finalizado com sucesso!')
pass
def cabecalho(titulo):
print('-'*30)
print(' '*9+titulo+' '*15)
print('-'*30)
pass
def mensagem_erro():
print('Dados inseridos são invalidos!')
pass
def verificar():
try:
cabecalho('Progressão PA')
num=int(input('Digite o primeiro termo: '))
numPA=int(input('Digite sua razão PA: '))
except:
mensagem_erro()
retorno()
else:
cont=1
loop=1
rept=1
contagem=0
while loop!=0:
if(rept==1):
while cont<=10:
if(cont>=10):
print('{} -> PAUSA\n'.format(num),end='')
else:
print('{} -> '.format(num),end='')
cont+=1
num+=numPA
contagem+=1
rept+=1
loop=int(input('Quantos termos deseja mostrar a mais? '))
if(loop<=0):
print('Progressão finalizada com {} termos mostrados'.format(contagem))
break
else:
cont=1
while cont<=loop:
if(cont>=loop):
print('{} -> PAUSA\n'.format(num),end='')
else:
print('{} -> '.format(num),end='')
cont+=1
num+=numPA
contagem+=1
rept+=1
loop=int(input('Quantos termos deseja mostrar a mais? '))
if(loop<=0):
print('Progressão finalizada com {} termos mostrados'.format(contagem))
break
retorno()
pass
verificar()
| 17.4
| 91
| 0.410795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 427
| 0.212861
|
db2d89c006750b429af0eb85221902cff310ad5b
| 3,278
|
py
|
Python
|
policies/plc_migrate_default.py
|
PaloAltoNetworks/pcs-migration-management
|
766c8c861befa92e593b23ad6d248e33f62054bb
|
[
"ISC"
] | 1
|
2022-03-17T12:51:45.000Z
|
2022-03-17T12:51:45.000Z
|
policies/plc_migrate_default.py
|
PaloAltoNetworks/pcs-migration-management
|
766c8c861befa92e593b23ad6d248e33f62054bb
|
[
"ISC"
] | 2
|
2021-11-03T15:34:40.000Z
|
2021-12-14T19:50:20.000Z
|
policies/plc_migrate_default.py
|
PaloAltoNetworks/pcs-migration-management
|
766c8c861befa92e593b23ad6d248e33f62054bb
|
[
"ISC"
] | 4
|
2021-11-09T17:57:01.000Z
|
2022-01-24T17:41:21.000Z
|
from policies import plc_get, plc_add, plc_update
from sdk.color_print import c_print
from tqdm import tqdm
def migrate_builtin_policies(tenant_sessions: list, logger):
'''
Updates the default/built in policies of all clone tenants so they are the same as the
source tenant. Default policies can not be added or deleted.
'''
tenant_updated_policies = []
tenant_default_policies = []
for tenant_session in tenant_sessions:
tenant_default_policies.append(plc_get.api_get_default(tenant_session, logger))
original_tenant = tenant_default_policies[0]
clone_tenant_default_policies = tenant_default_policies[1:]
for index, tenant in enumerate(clone_tenant_default_policies):
added = 0
for plc in tqdm(tenant, desc='Syncing Default Policies', leave=False):
for old_plc in original_tenant:
if plc['name'] == old_plc['name']:
#Compliance metadata is not apart of every policy so it has to be compared situationally
complianceMetadata = []
if 'complianceMetadata' in plc:
complianceMetadata = plc['complianceMetadata']
old_complianceMetadata = []
if 'complianceMetadata' in old_plc:
old_complianceMetadata = old_plc['complianceMetadata']
compFlag = False
for el in old_complianceMetadata:
name = el['standardName']
if name not in [cmp['standardName'] for cmp in complianceMetadata]:
compFlag = True
break
req_id = el['requirementId']
if req_id not in [cmp['requirementId'] for cmp in complianceMetadata]:
compFlag = True
break
sec_id = el['sectionId']
if sec_id not in [cmp['sectionId'] for cmp in complianceMetadata]:
compFlag = True
break
#Sort Labels
labels = plc['labels']
o_labels = old_plc['labels']
labels.sort()
o_labels.sort()
#If there is a difference between the source tenant policy and the destination tenant policy, then update the policy
# if plc['severity'] != old_plc['severity'] or plc['labels'] != old_plc['labels'] or plc['rule'] != old_plc['rule'] or compFlag:
if plc['severity'] != old_plc['severity'] or labels != o_labels or plc['rule'] != old_plc['rule'] or compFlag:
res = plc_add.update_default_policy(tenant_sessions[index + 1], old_plc, logger)
if res != 'BAD':
added += 1
tenant_updated_policies.append(added)
logger.info('Finished migrating Default Policies')
return tenant_updated_policies
if __name__ == '__main__':
from sdk.load_config import load_config_create_sessions
tenant_sessions = load_config_create_sessions()
migrate_builtin_policies(tenant_sessions)
| 46.169014
| 148
| 0.57352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 809
| 0.246797
|
db2e05e89e1db86e733714d3d045b8d52021205c
| 8,158
|
py
|
Python
|
MAIN VERSION 2.py
|
HorridHanu/Notepad-Python
|
5c40ddf0cc01b88387bf3052117581cba6e8ab6f
|
[
"Apache-2.0"
] | 1
|
2021-07-03T09:16:26.000Z
|
2021-07-03T09:16:26.000Z
|
MAIN VERSION 2.py
|
HorridHanu/Notepad-Python
|
5c40ddf0cc01b88387bf3052117581cba6e8ab6f
|
[
"Apache-2.0"
] | null | null | null |
MAIN VERSION 2.py
|
HorridHanu/Notepad-Python
|
5c40ddf0cc01b88387bf3052117581cba6e8ab6f
|
[
"Apache-2.0"
] | null | null | null |
########################################################################################
########################################################################################
## # CODE LANGUAGE IS PYHTON! ## ## ##
## # DATE: 1-JULY-2021 ## ## ######## ## ## ## ##
## # CODE BY HANU! ########## ## ######### ## ## ##
## # ONLY FOR EDUCATIONAL PURPOSE! ########## ####### ## ## ## ## ##
## # NOTEPAD COPY MAIN! ## ## ## ## ## ## ## ## ##
## # ITS ONLY DEMO! ## ## ####### ## ## ######## ##
########################################################################################
########################################################################################
#Define Functions For Cammand!
def fun():
print("yes work! \n"
"PLEASE CHECK NEXT VERSION ON ->Github.com/HorridHanu<- .")
# Define function for Files!
# Define function for Newfile!
import os.path
import os
def newfile():
global file
root.title("Untitled - Notepad")
file = None
text.delete(1.0, END)
# function for openfile!
from tkinter.filedialog import askopenfilename, asksaveasfilename
def openfile():
global file
file = askopenfilename(defaultextension=".txt", filetypes=[("All Files", "*.*"),
("Text Documents",
" *.txt")])
if file == "":
file=None
else:
root.title(os.path.basename(file) + " - Notepad")
text.delete(1.0, END)
f= open(file, "r")
text.insert(1.0, f.read())
f.close()
# function for savefile!
def savefile():
global file
if file == None:
file = asksaveasfilename(initialfile='Untitled.txt',defaultextension='.txt',
filetypes=[("All Files", ".txt"),
("Text Documents", ".txt")])
if file =="":
file =None
else:
#save the file!
root.title(os.path.basename(file) + " - Notepad")
f = open(file, "w")
f.write(text.get(1.0, END))
f.close()
# print("file save")
else:
# save the file!
f = open(file, "w")
f.write(text.get(1.0, END))
f.close()
# Define function for Edits!
# function for cut!
def cut():
text.event_generate(("<<Cut>>"))
# function for copy!
def copy():
text.event_generate(("<<Copy>>"))
# function for paste!
def paste():
text.event_generate(("<<Paste>>"))
# function for delete!
def delete():
text.delete(1.0, END)
# Define functions for ABOUT!
# import the message box as tmsg
import tkinter.messagebox as tmsg
# function for help!
def help():
# print("I will help you!")
# showinfo help to show a messsage !
tmsg.showinfo("Help", "Tell Us Whats happen?\nContact Us On ->Github.com/HorridHanu<-")
# print(a) return value (ok)
# function for rate!
def rate():
# askquestion help to to ask question in yes or no
a= tmsg.askquestion("Rate us!", " Was Your Experince Good?")
# print(a) return value is yes no or!
if a == 'yes':
msg = "Thanks Sir Please Rate Us On Appstore!"
else:
msg = "Tell Us Whats happen?\nContact Us On ->Github.com/HorridHanu<-"
tmsg.showinfo("Experince..", msg)
# function for joining!
def join_us():
ans = tmsg.askquestion("Join", "Would You Join Us On Github")
# print(ans)
if ans =="no":
msg = "Without Joining You Cann't Get Next Update!"
else:
msg ="Go To ->Github.com/HorridHanu<- \n For More Update And Versions...."
tmsg.showwarning("Warning", msg)
# define function for about!
def about():
tmsg.showerror("About", "Notepad By Hanu.. \nVersion 2.0.."
"\nCopy Right 2021 Hanu Corporation. "
"All Right Reserved!"
" For All OS {Windows}, {Linux}, {MacOS}"
" User Interface Are Protected By Trademark"
" And Other Pendings"
" Or Existing Intellecutal Property Right In "
" United State And Other Countries.")
#BASIC TKINTER SETUP!
from tkinter import *
root=Tk()
root.geometry("700x390")
root.title("Untitled - Notpad")
root.bell() #used to bell on opening!
# root.iconphoto("1.ICON.png")
# STATUS BAR!
statusbar = StringVar()
statusbar.set(" Be Happy....")
sbar = Label(root, textvariable=statusbar, relief=SUNKEN, anchor="w").pack(fill=X, side=BOTTOM)
# DEFINE FUNCTION FOR STATUS BAR!
def status_bar():
statusbar.set(" Font Lucida, Size 19 And You Are Working Be Happy.....")
# define function for font!
def font():
statusbar.set(" Font Is Lucida And Size Is 17......")
# define function for time!
# IMPORT Datetime MODULE!
from datetime import datetime
now = datetime.now()
Time = now.strftime("%H:%M")
Date = now.strftime("%D")
def time_now():
statusbar.set(f"{Time} {Date}")
# SCROLLBAR AND TEXT AREA!
# scrollbar using Scroll widget!
sb = Scrollbar(root)
sb.pack(fill=Y, side=RIGHT)
# Text area using text widget and connect with scroll bar!
text = Text(root, font="lucida 17", yscrollcommand=sb.set)
# for taking the full geometry
text.pack(fill=BOTH, expand=True)
file = None
sb.config(command=text.yview)
#Main Menu!
mainmenu=Menu(root)
# Submenu File!
m1 = Menu(mainmenu, tearoff=0)
m1.add_separator()
# to new file
m1.add_command(label="New Ctrl+N", command=newfile)
# m1.add_separator()
# to open existing file
m1.add_command(label="Open.. Ctrl+O", command=openfile)
# m1.add_separator()
# to save current file
m1.add_command(label="save Ctrl+s", command=savefile)
m1.add_separator()
# to print
m1.add_command(label="Print Ctrl+P", command=fun)
# to Exit!
m1.add_separator()
m1.add_command(label="Exit", command=exit) #exit has pre-function to exit!
mainmenu.add_cascade(label="File", menu=m1)
# file menu END
#Submenu Edit!
m2 = Menu(mainmenu, tearoff = 0)
m2.add_separator()
# to cut
m2.add_command(label="Cut Ctrl+X", command=cut)
# to copy
m2.add_command(label="Copy Ctrl+C", command=copy)
# to paste
m2.add_command(label="Paste Ctrl+V", command=paste)
m2.add_separator()
# to delete
m2.add_command(label="Delete Del", command=delete)
m2.add_separator()
m2.add_command(label="Select Ctrl+A",command=fun)
# to time
m2.add_command(label="Time/Date F5",command=time_now)
mainmenu.add_cascade(label="Edit", menu=m2)
# edit menu END
#Submenu Format
m3 = Menu(mainmenu, tearoff = 0)
m3.add_separator()
m3.add_command(label="WordWrap", command=fun)
# to font
m3.add_command(label="font..", command=font)
mainmenu.add_cascade(label="Format", menu=m3)
#Submenu Veiw
m4 = Menu(mainmenu, tearoff=0)
m4.add_separator()
# to view statusbar
m4.add_command(label="Status Bar", command=status_bar)
mainmenu.add_cascade(label="View", menu=m4)
#Submenu View Help
m5=Menu(mainmenu, tearoff = 0)
m5.add_separator()
# to view help
m5.add_command(label="View Help", command=help)
m5.add_separator()
# m5.add_separator()
# m5.add_separator()
# to rate
m5.add_command(label="Rate us!", command=rate)
# m5.add_separator()
# to join
m5.add_command(label="Join us!", command=join_us)
m5.add_separator()
m5.add_separator()
# about
m5.add_command(label="About Notepad", command=about)
mainmenu.add_cascade(label="Help", menu=m5)
# View help menu END
root.config(menu=mainmenu) #configure the mainmenu as menu
root.mainloop()
########################################################################################
########################################################################################
| 28.131034
| 95
| 0.539103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,011
| 0.491665
|
db308acc7784941bed9244b19f0ab77519bcb972
| 512
|
py
|
Python
|
unfollow_parfum.py
|
AntonPukhonin/InstaPy
|
0c480474ec39e174fa4256b48bc25bc4ecf7b6aa
|
[
"MIT"
] | null | null | null |
unfollow_parfum.py
|
AntonPukhonin/InstaPy
|
0c480474ec39e174fa4256b48bc25bc4ecf7b6aa
|
[
"MIT"
] | null | null | null |
unfollow_parfum.py
|
AntonPukhonin/InstaPy
|
0c480474ec39e174fa4256b48bc25bc4ecf7b6aa
|
[
"MIT"
] | null | null | null |
from instapy import InstaPy
#insta_username = 'antonpuhonin'
#insta_password = 'Bulbazavr36'
insta_username = 'tonparfums'
insta_password = 'ov9AN6NlnV'
try:
session = InstaPy(username=insta_username,
password=insta_password,
headless_browser=True,
multi_logs=True)
session.login()
session.unfollow_users(amount=200, onlyInstapyFollowed = True, onlyInstapyMethod = 'FIFO', unfollow_after=6*24*60*60 )
finally:
session.end()
| 24.380952
| 122
| 0.667969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 93
| 0.181641
|
db30f2130ff4ed72860f0513ddb8d069dd812ef8
| 1,462
|
py
|
Python
|
portal/grading/serializers.py
|
LDSSA/portal
|
9561da1e262678fe68dcf51c66007c0fb13eb51a
|
[
"MIT"
] | 2
|
2020-11-09T03:48:36.000Z
|
2021-07-02T14:30:09.000Z
|
portal/grading/serializers.py
|
LDSSA/portal
|
9561da1e262678fe68dcf51c66007c0fb13eb51a
|
[
"MIT"
] | 132
|
2020-04-25T15:57:56.000Z
|
2022-03-10T19:15:51.000Z
|
portal/grading/serializers.py
|
LDSSA/portal
|
9561da1e262678fe68dcf51c66007c0fb13eb51a
|
[
"MIT"
] | 1
|
2020-10-24T16:15:57.000Z
|
2020-10-24T16:15:57.000Z
|
from rest_framework import serializers
from portal.academy import models
from portal.applications.models import Submission, Challenge
class GradeSerializer(serializers.ModelSerializer):
notebook = serializers.FileField(source="feedback")
class Meta:
model = models.Grade
fields = (
"score",
"status",
"message",
"notebook",
)
class ChecksumSerializer(serializers.ModelSerializer):
unit = serializers.SlugField(source="code")
class Meta:
model = models.Unit
fields = (
"unit",
"checksum",
)
def update(self, instance, validated_data):
old_checksum = instance.checksum
instance = super().update(instance, validated_data)
if old_checksum != instance.checksum:
for grade in models.Grade.objects.filter(
unit=instance, status="graded"
):
grade.status = "out-of-date"
grade.save()
return instance
class AdmissionsGradeSerializer(serializers.ModelSerializer):
notebook = serializers.FileField(source="feedback")
class Meta(GradeSerializer.Meta):
model = Submission
class AdmissionsChecksumSerializer(serializers.ModelSerializer):
unit = serializers.SlugField(source="code")
class Meta:
model = Challenge
fields = (
"unit",
"checksum",
)
| 24.366667
| 64
| 0.613543
| 1,315
| 0.899453
| 0
| 0
| 0
| 0
| 0
| 0
| 119
| 0.081395
|
db3364ee622377b95d22e40cf02ce787e7812d16
| 323
|
py
|
Python
|
Funcoes/ex106-sistemaInterativoAjuda.py
|
ascaniopy/python
|
6d8892b7b9ff803b7422a61e68a383ec6ac7d62d
|
[
"MIT"
] | null | null | null |
Funcoes/ex106-sistemaInterativoAjuda.py
|
ascaniopy/python
|
6d8892b7b9ff803b7422a61e68a383ec6ac7d62d
|
[
"MIT"
] | null | null | null |
Funcoes/ex106-sistemaInterativoAjuda.py
|
ascaniopy/python
|
6d8892b7b9ff803b7422a61e68a383ec6ac7d62d
|
[
"MIT"
] | null | null | null |
from time import sleep
c = ('\033[m', # 0 - Sem cores
'\033[0;30;41m', # 1 - Vermelho
'\033[0;30;42m', # 2 - Verde
'\033[0;30;43m', # 3 - Amarelo
'\033[0;30;44m', # 4 - Azul
'\033[0;30;45m', # 5 - Roxo
'\033[0;30m' # 6 - Branco
)
#Programa principal
| 19
| 40
| 0.439628
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.616099
|
db3369b101ea183c503c1fa561b47c91b9100d56
| 36
|
py
|
Python
|
deeptrack/extras/__init__.py
|
Margon01/DeepTrack-2.0_old
|
f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf
|
[
"MIT"
] | 65
|
2020-04-29T01:06:01.000Z
|
2022-03-28T12:44:02.000Z
|
deeptrack/extras/__init__.py
|
Margon01/DeepTrack-2.0_old
|
f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf
|
[
"MIT"
] | 41
|
2020-04-20T16:09:07.000Z
|
2022-03-29T15:40:08.000Z
|
deeptrack/extras/__init__.py
|
Margon01/DeepTrack-2.0_old
|
f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf
|
[
"MIT"
] | 31
|
2020-04-27T18:04:06.000Z
|
2022-03-18T17:24:50.000Z
|
from . import datasets, radialcenter
| 36
| 36
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
db33adbcb92391813fa24af06e3df16ea1f77a19
| 236
|
py
|
Python
|
pyvisdk/enums/virtual_machine_ht_sharing.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/enums/virtual_machine_ht_sharing.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/enums/virtual_machine_ht_sharing.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
VirtualMachineHtSharing = Enum(
'any',
'internal',
'none',
)
| 15.733333
| 40
| 0.440678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.59322
|
db33d4b02c61194e50c6a9e8e0140a09b33f011f
| 1,710
|
py
|
Python
|
reo/migrations/0118_auto_20210715_2148.py
|
NREL/REopt_API
|
fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6
|
[
"BSD-3-Clause"
] | 7
|
2022-01-29T12:10:10.000Z
|
2022-03-28T13:45:20.000Z
|
reo/migrations/0118_auto_20210715_2148.py
|
NREL/reopt_api
|
fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6
|
[
"BSD-3-Clause"
] | 12
|
2022-02-01T18:23:18.000Z
|
2022-03-31T17:22:17.000Z
|
reo/migrations/0118_auto_20210715_2148.py
|
NREL/REopt_API
|
fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T19:44:40.000Z
|
2022-03-12T11:05:36.000Z
|
# Generated by Django 3.1.12 on 2021-07-15 21:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reo', '0117_auto_20210715_2122'),
]
operations = [
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_cost_Health',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_cost_Health_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_NOx',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_NOx_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_PM',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_PM_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_SO2',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='sitemodel',
name='lifetime_emissions_lb_SO2_bau',
field=models.FloatField(blank=True, null=True),
),
]
| 31.666667
| 59
| 0.588304
| 1,616
| 0.945029
| 0
| 0
| 0
| 0
| 0
| 0
| 406
| 0.237427
|
db34a67ee55a1e9b0a17aba6120305fef0d0c936
| 16,287
|
py
|
Python
|
bpy_lambda/2.78/scripts/addons_contrib/io_scene_cod/__init__.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/io_scene_cod/__init__.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | null | null | null |
bpy_lambda/2.78/scripts/addons_contrib/io_scene_cod/__init__.py
|
resultant-gamedev/bpy_lambda
|
c8cf46c10c69e74a0892b621d76c62edaa5b04bc
|
[
"MIT"
] | 1
|
2019-11-24T18:43:42.000Z
|
2019-11-24T18:43:42.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Blender-CoD: Blender Add-On for Call of Duty modding
Version: alpha 3
Copyright (c) 2011 CoDEmanX, Flybynyt -- blender-cod@online.de
http://code.google.com/p/blender-cod/
TODO
- UI for xmodel and xanim import (planned for alpha 4/5)
"""
bl_info = {
"name": "Blender-CoD - Add-On for Call of Duty modding (alpha 3)",
"author": "CoDEmanX, Flybynyt",
"version": (0, 3, 5),
"blender": (2, 62, 0),
"location": "File > Import | File > Export",
"description": "Export models to *.XMODEL_EXPORT and animations to *.XANIM_EXPORT",
"warning": "Alpha version, please report any bugs!",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Call_of_Duty_IO",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"support": "TESTING",
"category": "Import-Export"
}
# To support reload properly, try to access a package var, if it's there, reload everything
if "bpy" in locals():
import imp
if "import_xmodel" in locals():
imp.reload(import_xmodel)
if "export_xmodel" in locals():
imp.reload(export_xmodel)
if "import_xanim" in locals():
imp.reload(import_xanim)
if "export_xanim" in locals():
imp.reload(export_xanim)
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty
import bpy_extras.io_utils
from bpy_extras.io_utils import ExportHelper, ImportHelper
import time
# Planned for alpha 4/5
class ImportXmodel(bpy.types.Operator, ImportHelper):
"""Load a CoD XMODEL_EXPORT File"""
bl_idname = "import_scene.xmodel"
bl_label = "Import XMODEL_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
#use_meshes = BoolProperty(name="Meshes", description="Import meshes", default=True)
#use_armature = BoolProperty(name="Armature", description="Import Armature", default=True)
#use_bind_armature = BoolProperty(name="Bind Meshes to Armature", description="Parent imported meshes to armature", default=True)
#use_split_objects = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
#use_split_groups = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
#use_image_search = BoolProperty(name="Image Search", description="Search subdirs for any associated images (Warning, may be slow)", default=True)
def execute(self, context):
from . import import_xmodel
start_time = time.clock()
result = import_xmodel.load(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Import finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
"""
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self, "use_meshes")
col.prop(self, "use_armature")
row = layout.row()
row.active = self.use_meshes and self.use_armature
row.prop(self, "use_bind_armature")
"""
@classmethod
def poll(self, context):
return (context.scene is not None)
class ImportXanim(bpy.types.Operator, ImportHelper):
"""Load a CoD XANIM_EXPORT File"""
bl_idname = "import_scene.xanim"
bl_label = "Import XANIM_EXPORT"
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT;*.NT_EXPORT", options={'HIDDEN'})
def execute(self, context):
# print("Selected: " + context.active_object.name)
from . import import_xanim
return import_xanim.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
class ExportXmodel(bpy.types.Operator, ExportHelper):
"""Save a CoD XMODEL_EXPORT File"""
bl_idname = "export_scene.xmodel"
bl_label = 'Export XMODEL_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XMODEL_EXPORT"
filter_glob = StringProperty(default="*.XMODEL_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_version = EnumProperty(
name="Format Version",
description="XMODEL_EXPORT format version for export",
items=(('5', "Version 5", "vCoD, CoD:UO"),
('6', "Version 6", "CoD2, CoD4, CoD5, CoD7")),
default='6',
)
use_selection = BoolProperty(
name="Selection only",
description="Export selected meshes only (object or weight paint mode)",
default=False
)
use_vertex_colors = BoolProperty(
name="Vertex colors",
description="Export vertex colors (if disabled, white color will be used)",
default=True
)
use_vertex_colors_alpha = BoolProperty(
name="As alpha",
description="Turn RGB vertex colors into grayscale (average value) and use it as alpha transparency. White is 1 (opaque), black 0 (invisible)",
default=False
)
use_apply_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply all mesh modifiers except Armature (preview resolution)",
default=True
)
use_armature = BoolProperty(
name="Armature",
description="Export bones (if disabled, only a 'tag_origin' bone will be written)",
default=True
)
use_vertex_cleanup = BoolProperty(
name="Clean up vertices",
description="Try this if you have problems converting to xmodel. Skips vertices which aren't used by any face and updates references.",
default=False
)
use_armature_pose = BoolProperty(
name="Pose animation to models",
description="Export meshes with Armature modifier applied as a series of XMODEL_EXPORT files",
default=False
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_weight_min = BoolProperty(
name="Minimum bone weight",
description="Try this if you get 'too small weight' errors when converting",
default=False,
)
use_weight_min_threshold = FloatProperty(
name="Threshold",
description="Smallest allowed weight (minimum value)",
default=0.010097,
min=0.0,
max=1.0,
precision=6
)
def execute(self, context):
from . import export_xmodel
start_time = time.clock()
result = export_xmodel.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
#self.use_frame_start = context.scene.frame_start
self.use_frame_start = context.scene.frame_current
#self.use_frame_end = context.scene.frame_end
self.use_frame_end = context.scene.frame_current
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "use_version", expand=True)
# Calculate number of selected mesh objects
if context.mode in {'OBJECT', 'PAINT_WEIGHT'}:
meshes_selected = len([m for m in bpy.data.objects if m.type == 'MESH' and m.select])
else:
meshes_selected = 0
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i meshes)" % meshes_selected)
col.enabled = bool(meshes_selected)
col = layout.column(align=True)
col.prop(self, "use_apply_modifiers")
col = layout.column(align=True)
col.enabled = not self.use_armature_pose
if self.use_armature and self.use_armature_pose:
col.prop(self, "use_armature", "Armature (disabled)")
else:
col.prop(self, "use_armature")
if self.use_version == '6':
row = layout.row(align=True)
row.prop(self, "use_vertex_colors")
sub = row.split()
sub.active = self.use_vertex_colors
sub.prop(self, "use_vertex_colors_alpha")
col = layout.column(align=True)
col.label("Advanced:")
col = layout.column(align=True)
col.prop(self, "use_vertex_cleanup")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_armature_pose")
sub = box.column()
sub.active = self.use_armature_pose
sub.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = sub.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
box = layout.box()
col = box.column(align=True)
col.prop(self, "use_weight_min")
sub = box.column()
sub.enabled = self.use_weight_min
sub.prop(self, "use_weight_min_threshold")
@classmethod
def poll(self, context):
return (context.scene is not None)
class ExportXanim(bpy.types.Operator, ExportHelper):
"""Save a XMODEL_XANIM File"""
bl_idname = "export_scene.xanim"
bl_label = 'Export XANIM_EXPORT'
bl_options = {'PRESET'}
filename_ext = ".XANIM_EXPORT"
filter_glob = StringProperty(default="*.XANIM_EXPORT", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_selection = BoolProperty(
name="Selection only",
description="Export selected bones only (pose mode)",
default=False
)
use_framerate = IntProperty(
name="Framerate",
description="Set frames per second for export, 30 fps is commonly used.",
default=24,
min=1,
max=100
)
use_frame_start = IntProperty(
name="Start",
description="First frame to export",
default=1,
min=0
)
use_frame_end = IntProperty(
name="End",
description="Last frame to export",
default=250,
min=0
)
use_notetrack = BoolProperty(
name="Notetrack",
description="Export timeline markers as notetrack nodes",
default=True
)
use_notetrack_format = EnumProperty(
name="Notetrack format",
description="Notetrack format to use. Always set 'CoD 7' for Black Ops, even if not using notetrack!",
items=(('5', "CoD 5", "Separate NT_EXPORT notetrack file for 'World at War'"),
('7', "CoD 7", "Separate NT_EXPORT notetrack file for 'Black Ops'"),
('1', "all other", "Inline notetrack data for all CoD versions except WaW and BO")),
default='1',
)
def execute(self, context):
from . import export_xanim
start_time = time.clock()
result = export_xanim.save(self, context, **self.as_keywords(ignore=("filter_glob", "check_existing")))
if not result:
self.report({'INFO'}, "Export finished in %.4f sec." % (time.clock() - start_time))
return {'FINISHED'}
else:
self.report({'ERROR'}, result)
return {'CANCELLED'}
# Extend ExportHelper invoke function to support dynamic default values
def invoke(self, context, event):
self.use_frame_start = context.scene.frame_start
self.use_frame_end = context.scene.frame_end
self.use_framerate = round(context.scene.render.fps / context.scene.render.fps_base)
return super().invoke(context, event)
def draw(self, context):
layout = self.layout
bones_selected = 0
armature = None
# Take the first armature
for ob in bpy.data.objects:
if ob.type == 'ARMATURE' and len(ob.data.bones) > 0:
armature = ob.data
# Calculate number of selected bones if in pose-mode
if context.mode == 'POSE':
bones_selected = len([b for b in armature.bones if b.select])
# Prepare info string
armature_info = "%s (%i bones)" % (ob.name, len(armature.bones))
break
else:
armature_info = "Not found!"
if armature:
icon = 'NONE'
else:
icon = 'ERROR'
col = layout.column(align=True)
col.label("Armature: %s" % armature_info, icon)
col = layout.column(align=True)
col.prop(self, "use_selection", "Selection only (%i bones)" % bones_selected)
col.enabled = bool(bones_selected)
layout.label(text="Frame range: (%i frames)" % (abs(self.use_frame_end - self.use_frame_start) + 1))
row = layout.row(align=True)
row.prop(self, "use_frame_start")
row.prop(self, "use_frame_end")
col = layout.column(align=True)
col.prop(self, "use_framerate")
# Calculate number of markers in export range
frame_min = min(self.use_frame_start, self.use_frame_end)
frame_max = max(self.use_frame_start, self.use_frame_end)
num_markers = len([m for m in context.scene.timeline_markers if frame_max >= m.frame >= frame_min])
col = layout.column(align=True)
col.prop(self, "use_notetrack", text="Notetrack (%i nodes)" % num_markers)
col = layout.column(align=True)
col.prop(self, "use_notetrack_format", expand=True)
@classmethod
def poll(self, context):
return (context.scene is not None)
def menu_func_xmodel_import(self, context):
self.layout.operator(ImportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
"""
def menu_func_xanim_import(self, context):
self.layout.operator(ImportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
"""
def menu_func_xmodel_export(self, context):
self.layout.operator(ExportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
def menu_func_xanim_export(self, context):
self.layout.operator(ExportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.append(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.append(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.append(menu_func_xanim_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.remove(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.remove(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.remove(menu_func_xanim_export)
if __name__ == "__main__":
register()
| 34.144654
| 151
| 0.647203
| 12,750
| 0.782833
| 0
| 0
| 252
| 0.015472
| 0
| 0
| 6,742
| 0.41395
|
db359edbcc421125b398c8492ccfbe1df5e59aa8
| 771
|
py
|
Python
|
pynpact/tests/steps/test_extract.py
|
NProfileAnalysisComputationalTool/npact
|
d4495f5cba2a936f2be2f2c821edd5429d1a58da
|
[
"BSD-3-Clause"
] | 2
|
2015-09-18T02:01:19.000Z
|
2021-09-03T18:40:59.000Z
|
pynpact/tests/steps/test_extract.py
|
NProfileAnalysisComputationalTool/npact
|
d4495f5cba2a936f2be2f2c821edd5429d1a58da
|
[
"BSD-3-Clause"
] | null | null | null |
pynpact/tests/steps/test_extract.py
|
NProfileAnalysisComputationalTool/npact
|
d4495f5cba2a936f2be2f2c821edd5429d1a58da
|
[
"BSD-3-Clause"
] | 1
|
2015-09-25T18:58:21.000Z
|
2015-09-25T18:58:21.000Z
|
import os.path
import pytest
import py
from pynpact.steps import extract
def test_binfile_exists():
assert extract.BIN
assert os.path.exists(extract.BIN)
def test_plan(gbkconfig, executor):
extract.plan(gbkconfig, executor)
filename = gbkconfig[extract.OUTPUTKEY]
assert filename
p = py.path.local(filename)
assert p.exists()
# based on how many genes are in testgbk
assert 3 == len(p.readlines())
def test_plan_async(gbkconfig, async_executor):
extract.plan(gbkconfig, async_executor)
filename = gbkconfig[extract.OUTPUTKEY]
assert filename
async_executor.result(filename, 1)
p = py.path.local(filename)
assert p.exists()
# based on how many genes are in testgbk
assert 3 == len(p.readlines())
| 23.363636
| 47
| 0.713359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.103761
|
db3607c58d0cde5c1aa1bfb4ceddd2fc24ac1f1e
| 16,994
|
py
|
Python
|
dl_training/core.py
|
Duplums/SMLvsDL
|
b285717bd8d8e832b4bc9e2b42d18bd96b628def
|
[
"MIT"
] | null | null | null |
dl_training/core.py
|
Duplums/SMLvsDL
|
b285717bd8d8e832b4bc9e2b42d18bd96b628def
|
[
"MIT"
] | null | null | null |
dl_training/core.py
|
Duplums/SMLvsDL
|
b285717bd8d8e832b4bc9e2b42d18bd96b628def
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Core classes.
"""
# System import
import os
import pickle
from copy import deepcopy
import subprocess
# Third party import
import torch
import torch.nn.functional as func
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
# Package import
from dl_training.utils import checkpoint
from dl_training.history import History
import dl_training.metrics as mmetrics
import logging
class Base(object):
""" Class to perform classification.
"""
def __init__(self, optimizer_name="Adam", learning_rate=1e-3,
loss_name="NLLLoss", metrics=None, use_cuda=False,
pretrained=None, load_optimizer=True, use_multi_gpu=True,
**kwargs):
""" Class instantiation.
Observers will be notified, allowed signals are:
- 'before_epoch'
- 'after_epoch'
Parameters
----------
optimizer_name: str, default 'Adam'
the name of the optimizer: see 'torch.optim' for a description
of available optimizer.
learning_rate: float, default 1e-3
the optimizer learning rate.
loss_name: str, default 'NLLLoss'
the name of the loss: see 'torch.nn' for a description
of available loss.
metrics: list of str
a list of extra metrics that will be computed.
use_cuda: bool, default False
whether to use GPU or CPU.
pretrained: path, default None
path to the pretrained model or weights.
load_optimizer: boolean, default True
if pretrained is set, whether to also load the optimizer's weights or not
use_multi_gpu: boolean, default True
if several GPUs are available, use them during forward/backward pass
kwargs: dict
specify directly a custom 'model', 'optimizer' or 'loss'. Can also
be used to set specific optimizer parameters.
"""
self.optimizer = kwargs.get("optimizer")
self.logger = logging.getLogger("SMLvsDL")
self.loss = kwargs.get("loss")
self.device = torch.device("cuda" if use_cuda else "cpu")
for name in ("optimizer", "loss"):
if name in kwargs:
kwargs.pop(name)
if "model" in kwargs:
self.model = kwargs.pop("model")
if self.optimizer is None:
if optimizer_name in dir(torch.optim):
self.optimizer = getattr(torch.optim, optimizer_name)(
self.model.parameters(),
lr=learning_rate,
**kwargs)
else:
raise ValueError("Optimizer '{0}' uknown: check available "
"optimizer in 'pytorch.optim'.")
if self.loss is None:
if loss_name not in dir(torch.nn):
raise ValueError("Loss '{0}' uknown: check available loss in "
"'pytorch.nn'.")
self.loss = getattr(torch.nn, loss_name)()
self.metrics = {}
for name in (metrics or []):
if name not in mmetrics.METRICS:
raise ValueError("Metric '{0}' not yet supported: you can try "
"to fill the 'METRICS' factory, or ask for "
"some help!".format(name))
self.metrics[name] = mmetrics.METRICS[name]
if use_cuda and not torch.cuda.is_available():
raise ValueError("No GPU found: unset 'use_cuda' parameter.")
if pretrained is not None:
checkpoint = None
try:
checkpoint = torch.load(pretrained, map_location=lambda storage, loc: storage)
except BaseException as e:
self.logger.error('Impossible to load the checkpoint: %s' % str(e))
if checkpoint is not None:
if hasattr(checkpoint, "state_dict"):
self.model.load_state_dict(checkpoint.state_dict())
elif isinstance(checkpoint, dict):
if "model" in checkpoint:
try:
for key in list(checkpoint['model'].keys()):
if key.replace('module.', '') != key:
checkpoint['model'][key.replace('module.', '')] = checkpoint['model'][key]
del(checkpoint['model'][key])
#####
unexpected= self.model.load_state_dict(checkpoint["model"], strict=False)
self.logger.info('Model loading info: {}'.format(unexpected))
self.logger.info('Model loaded')
except BaseException as e:
self.logger.error('Error while loading the model\'s weights: %s' % str(e))
raise ValueError("")
if "optimizer" in checkpoint:
if load_optimizer:
try:
self.optimizer.load_state_dict(checkpoint["optimizer"])
for state in self.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(self.device)
except BaseException as e:
self.logger.error('Error while loading the optimizer\'s weights: %s' % str(e))
else:
self.logger.warning("The optimizer's weights are not restored ! ")
else:
self.model.load_state_dict(checkpoint)
if use_multi_gpu and torch.cuda.device_count() > 1:
self.model = DataParallel(self.model)
self.model = self.model.to(self.device)
def training(self, manager, nb_epochs: int, checkpointdir=None,
fold_index=None, scheduler=None, with_validation=True,
nb_epochs_per_saving=1, exp_name=None, **kwargs_train):
""" Train the model.
Parameters
----------
manager: a dl_training DataManager
a manager containing the train and validation data.
nb_epochs: int, default 100
the number of epochs.
checkpointdir: str, default None
a destination folder where intermediate models/historues will be
saved.
fold_index: int or [int] default None
the index(es) of the fold(s) to use for the training, default use all the
available folds.
scheduler: torch.optim.lr_scheduler, default None
a scheduler used to reduce the learning rate.
with_validation: bool, default True
if set use the validation dataset.
nb_epochs_per_saving: int, default 1,
the number of epochs after which the model+optimizer's parameters are saved
exp_name: str, default None
the experience name that will be launched
Returns
-------
train_history, valid_history: History
the train/validation history.
"""
train_history = History(name="Train_%s"%(exp_name or ""))
if with_validation is not None:
valid_history = History(name="Validation_%s"%(exp_name or ""))
else:
valid_history = None
print(self.loss)
print(self.optimizer)
folds = range(manager.get_nb_folds())
if fold_index is not None:
if isinstance(fold_index, int):
folds = [fold_index]
elif isinstance(fold_index, list):
folds = fold_index
init_optim_state = deepcopy(self.optimizer.state_dict())
init_model_state = deepcopy(self.model.state_dict())
if scheduler is not None:
init_scheduler_state = deepcopy(scheduler.state_dict())
for fold in folds:
# Initialize everything before optimizing on a new fold
self.optimizer.load_state_dict(init_optim_state)
self.model.load_state_dict(init_model_state)
if scheduler is not None:
scheduler.load_state_dict(init_scheduler_state)
loader = manager.get_dataloader(
train=True,
validation=True,
fold_index=fold)
for epoch in range(nb_epochs):
loss, values = self.train(loader.train, fold, epoch, **kwargs_train)
train_history.log((fold, epoch), loss=loss, **values)
train_history.summary()
if scheduler is not None:
scheduler.step()
print('Scheduler lr: {}'.format(scheduler.get_lr()), flush=True)
print('Optimizer lr: %f'%self.optimizer.param_groups[0]['lr'], flush=True)
if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
and epoch > 0:
if not os.path.isdir(checkpointdir):
subprocess.check_call(['mkdir', '-p', checkpointdir])
self.logger.info("Directory %s created."%checkpointdir)
checkpoint(
model=self.model,
epoch=epoch,
fold=fold,
outdir=checkpointdir,
name=exp_name,
optimizer=self.optimizer)
train_history.save(
outdir=checkpointdir,
epoch=epoch,
fold=fold)
if with_validation:
_, _, _, loss, values = self.test(loader.validation, **kwargs_train)
valid_history.log((fold, epoch), validation_loss=loss, **values)
valid_history.summary()
if checkpointdir is not None and (epoch % nb_epochs_per_saving == 0 or epoch == nb_epochs-1) \
and epoch > 0:
valid_history.save(
outdir=checkpointdir,
epoch=epoch,
fold=fold)
return train_history, valid_history
def train(self, loader,fold=None, epoch=None, **kwargs):
""" Train the model on the trained data.
Parameters
----------
loader: a pytorch Dataloader
Returns
-------
loss: float
the value of the loss function.
values: dict
the values of the metrics.
"""
self.model.train()
nb_batch = len(loader)
pbar = tqdm(total=nb_batch, desc="Mini-Batch")
values = {}
iteration = 0
losses = []
y_pred = []
y_true = []
for dataitem in loader:
pbar.update()
inputs = dataitem.inputs
if isinstance(inputs, torch.Tensor):
inputs = inputs.to(self.device)
list_targets = []
_targets = []
for item in (dataitem.outputs, dataitem.labels):
if item is not None:
_targets.append(item.to(self.device))
if len(_targets) == 1:
_targets = _targets[0]
list_targets.append(_targets)
self.optimizer.zero_grad()
outputs = self.model(inputs)
batch_loss = self.loss(outputs, *list_targets)
batch_loss.backward()
self.optimizer.step()
losses.append(float(batch_loss))
y_pred.extend(outputs.detach().cpu().numpy())
y_true.extend(list_targets[0].detach().cpu().numpy())
aux_losses = (self.model.get_aux_losses() if hasattr(self.model, 'get_aux_losses') else dict())
aux_losses.update(self.loss.get_aux_losses() if hasattr(self.loss, 'get_aux_losses') else dict())
for name, aux_loss in aux_losses.items():
if name not in values:
values[name] = 0
values[name] += float(aux_loss) / nb_batch
iteration += 1
loss = np.mean(losses)
for name, metric in self.metrics.items():
if name not in values:
values[name] = 0
values[name] = float(metric(torch.tensor(y_pred), torch.tensor(y_true)))
pbar.close()
return loss, values
def testing(self, loader: DataLoader, saving_dir=None, exp_name=None, **kwargs):
""" Evaluate the model.
Parameters
----------
loader: a pytorch DataLoader
saving_dir: str path to the saving directory
exp_name: str, name of the experiments that is used to derive the output file name of testing results.
Returns
-------
y: array-like
the predicted data.
X: array-like
the input data.
y_true: array-like
the true data if available.
loss: float
the value of the loss function if true data availble.
values: dict
the values of the metrics if true data availble.
"""
y, y_true, X, loss, values = self.test(loader)
if saving_dir is not None:
if not os.path.isdir(saving_dir):
subprocess.check_call(['mkdir', '-p', saving_dir])
self.logger.info("Directory %s created."%saving_dir)
with open(os.path.join(saving_dir, (exp_name or 'test')+'.pkl'), 'wb') as f:
pickle.dump({'y_pred': y, 'y_true': y_true, 'loss': loss, 'metrics': values}, f)
return y, X, y_true, loss, values
def test(self, loader):
""" Evaluate the model on the tests or validation data.
Parameter
---------
loader: a pytorch Dataset
the data loader.
Returns
-------
y: array-like
the predicted data.
y_true: array-like
the true data
X: array_like
the input data
loss: float
the value of the loss function.
values: dict
the values of the metrics.
"""
self.model.eval()
nb_batch = len(loader)
pbar = tqdm(total=nb_batch, desc="Mini-Batch")
loss = 0
values = {}
visuals = []
with torch.no_grad():
y, y_true, X = [], [], []
for dataitem in loader:
pbar.update()
inputs = dataitem.inputs
if isinstance(inputs, torch.Tensor):
inputs = inputs.to(self.device)
list_targets = []
targets = []
for item in (dataitem.outputs, dataitem.labels):
if item is not None:
targets.append(item.to(self.device))
y_true.extend(item.cpu().detach().numpy())
if len(targets) == 1:
targets = targets[0]
elif len(targets) == 0:
targets = None
if targets is not None:
list_targets.append(targets)
outputs = self.model(inputs)
if len(list_targets) > 0:
batch_loss = self.loss(outputs, *list_targets)
loss += float(batch_loss) / nb_batch
y.extend(outputs.cpu().detach().numpy())
if isinstance(inputs, torch.Tensor):
X.extend(inputs.cpu().detach().numpy())
aux_losses = (self.model.get_aux_losses() if hasattr(self.model, 'get_aux_losses') else dict())
aux_losses.update(self.loss.get_aux_losses() if hasattr(self.loss, 'get_aux_losses') else dict())
for name, aux_loss in aux_losses.items():
name += " on validation set"
if name not in values:
values[name] = 0
values[name] += aux_loss / nb_batch
# Now computes the metrics with (y, y_true)
for name, metric in self.metrics.items():
name += " on validation set"
values[name] = metric(torch.tensor(y), torch.tensor(y_true))
pbar.close()
return y, y_true, X, loss, values
| 41.550122
| 114
| 0.530658
| 16,140
| 0.949747
| 0
| 0
| 0
| 0
| 0
| 0
| 5,352
| 0.314935
|
db36254aae8d66e15ff58a16dc04f7e0fdb0d51b
| 865
|
py
|
Python
|
python/two_pointers/1004_max_consecutive_ones_iii.py
|
linshaoyong/leetcode
|
ea052fad68a2fe0cbfa5469398508ec2b776654f
|
[
"MIT"
] | 6
|
2019-07-15T13:23:57.000Z
|
2020-01-22T03:12:01.000Z
|
python/two_pointers/1004_max_consecutive_ones_iii.py
|
linshaoyong/leetcode
|
ea052fad68a2fe0cbfa5469398508ec2b776654f
|
[
"MIT"
] | null | null | null |
python/two_pointers/1004_max_consecutive_ones_iii.py
|
linshaoyong/leetcode
|
ea052fad68a2fe0cbfa5469398508ec2b776654f
|
[
"MIT"
] | 1
|
2019-07-24T02:15:31.000Z
|
2019-07-24T02:15:31.000Z
|
from collections import deque
class Solution(object):
def longestOnes(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
start, res = 0, 0
zeros = deque()
for i in range(len(A)):
if A[i] == 0:
zeros.append(i)
if K == 0:
res = max(res, i - start)
start = zeros.popleft() + 1
else:
K -= 1
res = max(res, len(A) - start)
return res
def test_long_ones():
s = Solution()
assert 6 == s.longestOnes([1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0], 2)
assert 10 == s.longestOnes(
[0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1], 3)
assert 3 == s.longestOnes([0, 0, 1, 1, 1, 0, 0], 0)
assert 4 == s.longestOnes([0, 0, 0, 1], 4)
| 27.03125
| 69
| 0.419653
| 516
| 0.596532
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.095954
|
db3658941378a7367cc8947a67be394b0c932596
| 3,000
|
py
|
Python
|
student_files/lap_times_db.py
|
jstucken/DET-Python-Anki-Overdrive-v1-1
|
74cfcd9ea533cc0127fa2b8bd4ed26400da8a21b
|
[
"MIT"
] | null | null | null |
student_files/lap_times_db.py
|
jstucken/DET-Python-Anki-Overdrive-v1-1
|
74cfcd9ea533cc0127fa2b8bd4ed26400da8a21b
|
[
"MIT"
] | null | null | null |
student_files/lap_times_db.py
|
jstucken/DET-Python-Anki-Overdrive-v1-1
|
74cfcd9ea533cc0127fa2b8bd4ed26400da8a21b
|
[
"MIT"
] | null | null | null |
#
# This script allows the user to control an Anki car using Python
# To control multiple cars at once, open a seperate Command Line Window for each car
# and call this script with the approriate car mac address.
# This script attempts to save lap times into local mysql db running on the pi
# Author: jstucken
# Created: 23-2-2021
#
SCRIPT_TITLE="Lap timer saving to Mysql"
# import required modules
import loader.bootstrapper
import time
from overdrive import Overdrive
from php_communicator import PhpCommunicator
from network import Network
# Setup our car
car = Overdrive(12) # init overdrive object
car.enableLocationData()
# get car mac address from our class object
car_mac = car.getMacAddress()
car_id = car.getCarId()
username = car.getUsername()
student_id = car.getStudentId()
# count number of laps completed
lap_count = 0
# start the car off
# usage: car.changeSpeed(speed, accel)
car.changeSpeed(400, 800)
last_lap_time = 0
last_lap_count = -1
# race 3 laps and time each one
while lap_count !=3:
time.sleep(0.1)
# lap count is incremented when cars pass over the finish line
lap_count = car.getLapCount()
# count laps done
if last_lap_count != lap_count:
last_lap_count = lap_count
print()
print("lap_count: "+str(lap_count))
# get lap time
prev_lap_time = car.getLapTime()
if last_lap_time != prev_lap_time:
print()
print("prev_lap_time: "+str(prev_lap_time))
# if car has completed at least 1 lap
if lap_count > 0:
# Save last_lap_time time to database now
# get cars current location and speed
location = car.getLocation()
speed = car.getSpeed()
# data to be sent to API
data = {
'student_id':student_id,
'car_id':car_id,
'lap_time':prev_lap_time,
'lap_count':lap_count,
'speed':speed
}
# get the local IP address of the server machine
local_ip_address = Network.getLocalIPAddress()
# build our PHP script URL where data will be sent to be saved
# eg "http://192.168.0.10/lap_times_save.php"
url = "http://"+local_ip_address+"/python_communicator/lap_times_save.php"
# Send data to PHP to save to database
php = PhpCommunicator()
return_text = php.getResponse(url, data) # get the response from PHP
# extracting response text
print("Response from PHP script: %s"%return_text)
# end if
print()
print("*****")
last_lap_time = prev_lap_time
# stop the car
car.stopCarFast()
print("Stopping as car has done the required number of laps")
car.disconnect()
quit()
| 28.037383
| 87
| 0.606333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,333
| 0.444333
|
db377a3b2e18c647ed0d195a162511f6c719f4a5
| 9,992
|
py
|
Python
|
flatsat/opensatkit/cfs/apps/adcs_io/adcs-drivers/cubewheel-driver/test/code.py
|
cromulencellc/hackasat-final-2021
|
d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac
|
[
"MIT"
] | 4
|
2022-02-25T05:45:27.000Z
|
2022-03-10T01:05:27.000Z
|
flatsat/opensatkit/cfs/apps/adcs_io/adcs-drivers/cubewheel-driver/test/code.py
|
cromulencellc/hackasat-final-2021
|
d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac
|
[
"MIT"
] | null | null | null |
flatsat/opensatkit/cfs/apps/adcs_io/adcs-drivers/cubewheel-driver/test/code.py
|
cromulencellc/hackasat-final-2021
|
d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac
|
[
"MIT"
] | 2
|
2022-03-02T02:14:16.000Z
|
2022-03-05T07:36:18.000Z
|
import board
from i2cperipheral import I2CPeripheral
from analogio import AnalogOut
from digitalio import DigitalInOut, Direction, Pull
import struct
import math
import time
regs = [0] * 16
index = 0
i2c_addr = 0x68
frame_id = 0
motor_control_mode = 0
backup_mode = 0
motor_switch_state = 0
hall_switch_state = 0
encoder_switch_state = 0
error_flag = 0
unused = 0
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
reference_speed = 0
wheel_current = 290 # mA
wheel_speed = math.floor(100/2) #rpm
wheel_duty = 5
wheel_speed_backup = wheel_speed
def send_tlm_identification():
# print("Send TLM Identification")
output = []
output += bytearray([8, 0, 9, 8]) + struct.pack("H", 1111) + struct.pack("H", 8888)
return output
def send_tlm_identification_ext():
# print("Send TLM Identification Ext")
output = []
output += struct.pack("H", 1234) + bytearray([68, 0xFF])
return output
def send_tlm_status(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag):
# print("Send TLM Status MCM:{0:d}, BM:{1:d}, MSS:{2:d} HSS:{3:d}, ESS:{4:d}, Error Flag: {5:d}".format(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag))
status = 0
status |= (backup_mode & 0x1) << 7
status |= (motor_switch_state & 0x1) << 6
status |= (hall_switch_state & 0x1) << 5
status |= (encoder_switch_state & 0x1) << 4
status |= (error_flag & 0x1) << 3
status |= unused
# print("Status byte: {0:d}:{1:08b}".format(status,status))
output = []
output = struct.pack("H", 1111) + struct.pack("H", 8888) + bytearray([0, 0, motor_control_mode, status])
return output
def send_tlm_wheel_data_full(wheel_speed, wheel_reference_speed, wheel_current):
# print("Send TLM Wheel Data Full")
output = []
output += struct.pack("h", wheel_speed) + struct.pack("h", wheel_reference_speed) + struct.pack("h", wheel_current)
return output
def send_tlm_wheel_data_additional(wheel_duty, wheel_speed_backup):
# print("Send TLM Wheel Data Additional")
output = []
output += struct.pack("h", wheel_duty) + struct.pack("h", wheel_duty)
return output
def send_tlm_wheel_status_flags(invalidTelemetryFlag=0, invalidTelecommandFlag=0, encoderError=0, uartError=0, i2cError=0, canError=0, configurationError=0, speedError=0):
status = 0
status |= (invalidTelemetryFlag & 0x01)
status |= (invalidTelecommandFlag & 0x01) << 1
status |= (encoderError & 0x01) << 2
status |= (uartError & 0x01) << 3
status |= (i2cError & 0x01) << 4
status |= (canError & 0x01) << 5
status |= (configurationError & 0x01) << 6
status |= (speedError & 0x01) << 7
return bytearray([status])
def voltage_to_dac(voltage):
return math.floor((voltage*1024)/3.3 * 64)
vout = 0.95
dac_value = voltage_to_dac(vout)
print("Set analog output for testing: {0:f} ({1:d}) V".format(vout, dac_value))
analog_out = AnalogOut(board.A0)
analog_out.value = dac_value
enable_pin = DigitalInOut(board.D8)
enable_pin.direction = Direction.INPUT
# enable_pin.pull = Pull.DOWN
print("Waiting for wheel enable")
while enable_pin.value == False:
time.sleep(0.1)
print("Starting I2C response")
with I2CPeripheral(board.SCL, board.SDA, (i2c_addr,)) as device:
while True:
r = device.request()
if not r:
# Maybe do some housekeeping
continue
with r: # Closes the transfer if necessary by sending a NACK or feeding dummy bytes
# print("Process request")
# print("I2C Addr: 0x{0:02X}, Is Read {1:d}, Is Restart {2:d}".format(r.address, r.is_read, r.is_restart))
if r.address == i2c_addr:
if not r.is_read: # Main write which is Selected read
# print("Get Frame Id Byte")
b = r.read(1)
if b:
frame_id = struct.unpack("B", b)[0]
print("Recieved frame ID: " + str(frame_id))
if frame_id < 40:
# print("Telecommand Recieved")
if frame_id == 1:
reset_id = struct.unpack("B", r.read(1))[0]
# print("Reset telecommand recieved: {0:d}".format(reset_id))
elif frame_id == 2:
reference_speed = struct.unpack("h", r.read(2))[0]
reference_speed_rpm = float(reference_speed/2.0)
wheel_speed = reference_speed + 5
# print("Reference speed telecommand recieved. Speed: {0:d}:{1:f}".format(reference_speed, reference_speed_rpm))
elif frame_id == 3:
wheel_duty = struct.unpack("h", r.read(2))[0]
# print("Duty cycle command recieved. Duty Cycle: {0:d}".format(wheel_duty))
elif frame_id == 7:
motor_switch_state = r.read(1)
# print("Recieved motor power state command. State: {}".format(motor_switch_state))
elif frame_id == 8:
encoder_switch_state = r.read(1)
# print("Recieved encoder power state command. State: {}".format(encoder_switch_state))
elif frame_id == 8:
hall_switch_state = r.read(1)
# print("Recieved hall power state command. State: {}".format(encoder_switch_state))
elif frame_id == 10:
motor_control_mode = struct.unpack("B", r.read(1))[0]
# print("Control mode telecommand recieved. Mode: {0:d}".format(motor_control_mode))
elif frame_id == 12:
backup_mode = r.read(1)
# print("Recieved back-up mode state command. State: {}".format(backup_mode))
elif frame_id == 20:
clear_errors = r.read(1)
if clear_errors == 85:
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
elif frame_id == 31:
new_i2c_addr = r.read(1)
# print("Recieved set I2C addr command. I2C: {}".format(new_i2c_addr))
elif frame_id == 33:
new_can_mask = r.read(1)
# print("Recieved set CAN mask command. CAN Mask: {}".format(new_can_mask))
elif frame_id == 33:
b = r.read(3)
# print("Recieved PWM Gain Command: {0:s}".format(str(b)))
elif frame_id == 34:
b = r.read(6)
# print("Recieved Main Speed Controller Gain Command: {0:s}".format(str(b)))
elif frame_id == 35:
b = r.read(6)
# print("Recieved Backup Speed Controller Gain Command: {0:s}".format(str(b)))
else:
invalidTelecommandFlag = 1
else:
# print("No data to read")
continue
elif r.is_restart: # Combined transfer: This is the Main read message
# print("Recieved Telemetry Request")
n = 0
if frame_id == 128:
n = r.write(bytes(send_tlm_identification()))
elif frame_id == 129:
n = r.write(bytes(send_tlm_identification_ext()))
elif frame_id == 130:
n = r.write(bytes(send_tlm_status(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag)))
elif frame_id == 133:
n = r.write(bytes(2))
elif frame_id == 134:
n = r.write(bytes(2))
elif frame_id == 135:
n = r.write(bytes(2))
elif frame_id == 137:
n = r.write(bytes(send_tlm_wheel_data_full(wheel_speed, reference_speed, wheel_current)))
elif frame_id == 138:
n = r.write(bytes(send_tlm_wheel_data_additional(wheel_duty, wheel_speed_backup)))
elif frame_id == 139:
n = r.write(bytearray([9,8,7]))
elif frame_id == 140:
n = r.write(bytearray([1,2,3,4,5,6]))
elif frame_id == 141:
n = r.write(bytearray([10, 11, 12, 13, 14, 15]))
elif frame_id == 145:
n = r.write(bytes(send_tlm_wheel_status_flags(invalidTelemetryFlag, invalidTelecommandFlag, encoderError, uartError, i2cError, canError, configurationError, speedError)))
else:
invalidTelemetryFlag = 1
# print("Wrote " + str(n) + " bytes to master")
| 46.910798
| 214
| 0.522218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,127
| 0.21287
|
db37c14354deeb12104130ebc747684e2912a561
| 360
|
py
|
Python
|
constants.py
|
tooreht/airstripmap
|
7a65e67e417870c6853fd1adb848cf91d724f566
|
[
"MIT"
] | null | null | null |
constants.py
|
tooreht/airstripmap
|
7a65e67e417870c6853fd1adb848cf91d724f566
|
[
"MIT"
] | null | null | null |
constants.py
|
tooreht/airstripmap
|
7a65e67e417870c6853fd1adb848cf91d724f566
|
[
"MIT"
] | null | null | null |
GOV_AIRPORTS = {
"Antananarivo/Ivato": "big",
"Antsiranana/Diego": "small",
"Fianarantsoa": "small",
"Tolagnaro/Ft. Dauphin": "small",
"Mahajanga": "medium",
"Mananjary": "small",
"Nosy Be": "medium",
"Morondava": "small",
"Sainte Marie": "small",
"Sambava": "small",
"Toamasina": "small",
"Toliary": "small",
}
| 24
| 37
| 0.561111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.680556
|
db399ce2f0303a23e925d9d8085ddcee798d396a
| 608
|
py
|
Python
|
practical_0/fibonacci.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
practical_0/fibonacci.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
practical_0/fibonacci.py
|
BarracudaPff/code-golf-data-pythpn
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
[
"MIT"
] | null | null | null |
def fibonacci(n):
fibonacci = np.zeros(10, dtype=np.int32)
fibonacci_pow = np.zeros(10, dtype=np.int32)
fibonacci[0] = 0
fibonacci[1] = 1
for i in np.arange(2, 10):
fibonacci[i] = fibonacci[i - 1] + fibonacci[i - 2]
fibonacci[i] = int(fibonacci[i])
print(fibonacci)
for i in np.arange(10):
fibonacci_pow[i] = np.power(int(fibonacci[i]), int(n))
print(fibonacci_pow)
print(np.vstack((fibonacci, fibonacci_pow)))
np.savetxt("myfibonaccis.txt", np.hstack((fibonacci, fibonacci_pow)), fmt="%u")
def main(n):
fibonacci(n)
if __name__ == "__main__":
INPUT = sys.argv[1]
print(INPUT)
main(INPUT)
| 30.4
| 80
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.052632
|
db3a4d55930ad8686d2de82e1838a1ca79a144ec
| 24,800
|
py
|
Python
|
UW_System/UW_System/UW_System/spiders/uw_system.py
|
Nouldine/MyCrawlerSystem
|
7bba8ba3ec76e10f70a35700602812ee6f039b63
|
[
"MIT"
] | null | null | null |
UW_System/UW_System/UW_System/spiders/uw_system.py
|
Nouldine/MyCrawlerSystem
|
7bba8ba3ec76e10f70a35700602812ee6f039b63
|
[
"MIT"
] | null | null | null |
UW_System/UW_System/UW_System/spiders/uw_system.py
|
Nouldine/MyCrawlerSystem
|
7bba8ba3ec76e10f70a35700602812ee6f039b63
|
[
"MIT"
] | null | null | null |
from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.linkextractors import LinkExtractor
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from w3lib.html import remove_tags
from UW_System.items import UwSystemItem
class uw_system( scrapy.Spider ):
name = 'uw_system'
allowed_domains = ['wisconsin.edu']
start_urls = [
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0701&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0502&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1001&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2211&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2212&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2202&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1003&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1002&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1911&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0517&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0401&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1905&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2213&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0863&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0829&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0877&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1220&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1506&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1008&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4957&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0823&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2204&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0862&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0821&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0801&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0802&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1501&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4955&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1010&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0504&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1512&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1102&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4931&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2206&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1914&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1103&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0837&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2205&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4913&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2210&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0838&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0601&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1801&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0855&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4901&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0506&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0509&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1701&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0702&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1005&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0870&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0876&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1509&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1902&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2207&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2001&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2222&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2103&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1510&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2208&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=2104&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1105&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0808&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=0865&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=1007&submitButton=Match+All+Courses",
"https://www.wisconsin.edu/transfer/wizards/?tis=edu.uwsa.tis.Wizards.JSP%252FequivalencyReport.do%253Bjsessionid%253D310EE116C932F326F4DB0D473D13D51B&tispage=2&fromField=nothingChanged&fromInstitutionId=4684&reqType=C&toInstitutionId=4690&departmentId=4949&submitButton=Match+All+Courses",
]
def start_requests( self ):
for u in self.start_urls:
yield scrapy.Request( u, callback = self.parse_httpbin,
errback = self.errback_httpbin,
dont_filter = True )
def parse_httpbin( self, response ):
self.logger.info("Got successful response {}".format(response.url) )
#items = UwSystemItem()
#course = response.css('#reportTable > tbody > tr > td.::text').extract()
#course = response.css('tbody > tr > td::text').extract()
#course = response.css('.campus-one-list::text').extract()[0];
course_1 = response.xpath('////tr/td[1][@class="campus-one-list"]/text()').extract()
title_1 = response.xpath('////tr/td[2][@class="campus-one-list"]/text()').extract()
course_2 = response.xpath('////tr/td[3][@class="campus-two-list"]/text()').extract()
title_2 = response.xpath('////tr/td[4][@class="campus-two-list"]/text()').extract()
credits = response.xpath('////tr/td[5][@class="campus-two-list"]/text()').extract()
gen_ed = response.xpath('////tr/td[6][@class="campus-two-list"]').extract()
level = response.xpath('////tr/td[7][@class="campus-two-list"]').extract()
special = response.xpath('////tr/td[8][@class="special-list"]').extract()
final_course_1 = []
final_title_1 = []
final_course_2 = []
final_title_2 = []
final_credits = []
final_gen_ed = []
final_level = []
final_special = []
for course_set1 in course_1:
if course_set1 == '\n' or course_set1 == ' ':
continue
final_course_1.append(remove_tags(course_set1))
for title1 in title_1:
if title1 == '\n' or title1 == ' ':
continue
final_title_1.append(remove_tags(title1))
for course_set2 in course_2:
if course_set2 == '\n' or course_set2 == ' ':
continue
final_course_2.append(remove_tags(course_set2))
for title2 in title_2:
if title2 == '\n' or title2 == ' ':
continue
final_title_2.append(remove_tags(title2))
for creditset in credits:
if creditset == '\n' or creditset == ' ':
continue
final_credits.append(remove_tags(creditset))
for gen in gen_ed:
if gen == '\n':
continue
final_gen_ed.append(remove_tags(gen))
for lev in level:
if lev == '\n' or lev == ' ':
continue
final_level.append(remove_tags(lev))
for specia in special:
if specia == '\n\n ':
continue
final_special.append(remove_tags(specia))
item = []
track_index = 0
course_size = len(final_course_1)
while track_index < course_size:
items = UwSystemItem()
items['course_1'] = final_course_1[ track_index ]
items['title_1'] = final_title_1[ track_index ]
items['course_2'] = final_course_2[ track_index ]
items['title_2'] = final_title_2[ track_index ]
items['credits'] = final_credits[ track_index ]
try:
items['gen_ed'] = final_gen_ed[ track_index ]
except IndexError:
items['gen_ed'] = 'None'
try:
items['level'] = final_level[ track_index ]
except IndexError:
items['level'] = 'None'
try:
items['special'] = final_special[ track_index ]
except IndexError:
items['special'] = 'None'
item.append(items)
track_index += 1
return item
def errback_httpbin( self, failure):
# log all failures
self.logger.error(repr(failure))
# in case you want to do something special for some errors,
# you may need the failure's type:
if failure.check(HttpError):
# These exception come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
self.logger.error("HttpError on %s", response.url )
elif failure.check(DNSLookupError):
# This is the original request
request = failure.request
self.logger.error('DNSLookupError on %s', request.url )
elif failure.check(TimeoutError, TCPTimeOutError ):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
| 90.510949
| 303
| 0.798185
| 24,240
| 0.977419
| 225
| 0.009073
| 0
| 0
| 0
| 0
| 20,166
| 0.813145
|
db3b169862361f20c4e85e1f3babf59d22b794c5
| 10,622
|
py
|
Python
|
src/lib/GL/glutbindings/glutbind.py
|
kokizzu/v8cgi
|
eafd3bd7a5dd1d60e2f1483701a52e7ac0ae0eba
|
[
"BSD-3-Clause"
] | 4
|
2016-01-31T08:49:35.000Z
|
2021-07-12T17:31:42.000Z
|
src/lib/GL/glutbindings/glutbind.py
|
kokizzu/v8cgi
|
eafd3bd7a5dd1d60e2f1483701a52e7ac0ae0eba
|
[
"BSD-3-Clause"
] | null | null | null |
src/lib/GL/glutbindings/glutbind.py
|
kokizzu/v8cgi
|
eafd3bd7a5dd1d60e2f1483701a52e7ac0ae0eba
|
[
"BSD-3-Clause"
] | 1
|
2021-06-03T22:51:17.000Z
|
2021-06-03T22:51:17.000Z
|
import sys
import re
PATH_GLUT = 'glut.h'
FILE_GLUT = 'glutbind.cpp'
TEMPLATES = ['glutInit', 'glutTimerFunc']
def main():
"""
Still some things have to be hand-made, like
changing argv pargc values in the glutInit method definition
Also change the TimerFunc method with some magic.
"""
make_glut()
def make_glut():
constants = []
functions = []
void_stars = []
constant = re.compile(".+define[\s]+GLUT_([^\s]+).*")
function = re.compile("[\s]*extern[\s]+([^\s]+)[\s]+APIENTRY[\s]+glut([A-Za-z0-9]+)\((.*)\);")
text_out = []
fin = open(PATH_GLUT, 'r')
for l in fin:
mat = re.match(constant, l)
if mat and not mat.group(1) in constants:
name = mat.group(1)
constants.append(name)
text_out.append(make_constant("GLUT", name))
if name.find("STROKE") != -1 or name.find("BITMAP") != -1:
void_stars.append(name)
#print "GLUT_" + mat.group(1) + "\n"
else:
mat = re.match(function, l)
if mat:
prefix = "glut"
return_val = mat.group(1)
name = mat.group(2)
params = mat.group(3)
functions.append(name)
#if has template then take the template code
if (prefix + name) in TEMPLATES:
t = open(prefix + name + '.template', 'r')
text_out.append(t.read())
t.close()
else:
has_lambda, count, params_list = get_param_list(params)
if has_lambda is True and count == 1:
text_out.append(make_function_with_callback(prefix, name, params_list, return_val))
else:
text_out.append(make_function(prefix, name, params_list, count, return_val))
#print return_val + " " + name + " " + params
fin.close()
fout = open(FILE_GLUT, 'w')
fout.write("""
#include "glutbind.h"
int* pargc_;
char** argv_;
map<const char*, void*> font_;
Persistent<Context> GlutFactory::glut_persistent_context;
""" + '\n'.join(text_out) + make_main_glut_function(constants, functions, void_stars))
fout.close()
def make_main_glut_function(constants, functions, void_stars):
text_out_begin = """
Handle<ObjectTemplate> GlutFactory::createGlut(int* pargc, char** argv) {
pargc_ = pargc;
argv_ = argv;
HandleScope handle_scope;
Handle<ObjectTemplate> Glut = ObjectTemplate::New();
Glut->SetInternalFieldCount(1);
"""
text_out_end = """
// Again, return the result through the current handle scope.
return handle_scope.Close(Glut);
}
"""
fnt = [bind_font(name) for name in void_stars]
cts = [bind_accessor("Glut", name) for name in constants]
fts = [bind_function("Glut", name) for name in functions]
return text_out_begin + '\n'.join(fnt) + '\n'.join(cts) + '\n'.join(fts) + text_out_end
def make_constant(prefix, name):
if name.find("BITMAP") != -1 or name.find("STROKE") != -1:
return_val = "return String::New(\""+ name +"\");\n"
else:
return_val = "return Uint32::New(GLUT_"+ name +");"
text_out = """
Handle<Value> GetGLUT_%%(Local<String> property,
const AccessorInfo &info) {
##
}
"""
return multiple_replace({
'%%': name,
'##': return_val
}, text_out)
def make_function(prefix, name, params_list, count, return_val):
text_out = """
Handle<Value> GLUT<name>Callback(const Arguments& args) {
//if less that nbr of formal parameters then do nothing
if (args.Length() < <len_params>) return v8::Undefined();
//define handle scope
HandleScope scope;
//get arguments
<args>
//make call
<call>
return v8::Undefined();
}
"""
return multiple_replace({
'<name>': name,
'<len_params>': str(count),
'<args>': make_args(params_list, count),
'<call>': make_call(prefix + name, params_list, count)
}, text_out)
def make_function_with_callback(prefix, name, params_list, return_val):
text_out = """
Persistent<Function> persistent<name>;
<prototype> {
//define handle scope
HandleScope scope;
Handle<Value> valueArr[<nformalparams>];
<formalparamassignment>
TryCatch try_catch;
Handle<Value> result = persistent<name>->Call(GlutFactory::glut_persistent_context->Global(), <nformalparams>, valueArr);
if (result.IsEmpty()) {
String::Utf8Value error(try_catch.Exception());
fprintf(stderr, "Exception in <name>: %s\\n", *error);
}
}
Handle<Value> GLUT<name>Callback(const Arguments& args) {
//if less that nbr of formal parameters then do nothing
if (args.Length() < 1 || !args[0]->IsFunction()) return v8::Undefined();
//get arguments
//delete previous assigned function
persistent<name>.Dispose();
Handle<Function> value0 = Handle<Function>::Cast(args[0]);
persistent<name> = Persistent<Function>::New(value0);
//make call
glut<name>((<signature>) func<name>);
return v8::Undefined();
}
"""
nformalparams, prototype = make_prototype(name, params_list[0])
signature = params_list[0].replace('func', '')
formalparamassignment = formal_param_assignment(signature)
return multiple_replace({
'<name>': name,
'<nformalparams>': str(nformalparams),
'<prototype>': prototype,
'<formalparamassignment>': formalparamassignment,
'<signature>': signature
}, text_out)
def make_prototype(name, signature):
print 'prev ' + signature
signature = signature.replace('(*func)', 'func' + name)
ht = signature.split('(')
hd, tail = ht[0], ht[1].replace(')', '')
ans = [get_type(''.join(val), False) + ' arg' + str(i) for i, val in enumerate(tail.split(',')) if val.find('void') == -1]
#.strip().split(' ')[:-1]
print 'end ' + hd + ' ( ' + ','.join(ans) + ')'
return len(ans), hd + ' ( ' + ','.join(ans) + ')'
def formal_param_assignment(signature):
print "signature"
print signature
pat = re.compile('[\s]*[a-zA-Z0-9\*]+[\s]*\(\*[\s]*\)\((.*)\)')
pars = re.match(pat, signature)
if pars:
pars = pars.group(1).split(',')
ans = []
for i, val in enumerate(pars):
if val.find('int') != -1 or val.find('unsigned char') != -1:
ans.append(" valueArr[" + str(i) + "] = Integer::New(arg" + str(i) + ");")
elif val.find('float') != -1 or val.find('double') != -1:
ans.append(" valueArr[" + str(i) + "] = Number::New(arg" + str(i) + ");")
elif val.find('char*') != -1:
ans.append(" valueArr[" + str(i) + "] = String::New(arg" + str(i) + ");")
return '\n'.join(ans)
else:
return ''
def get_param_list(params):
params_list = []
params_aux = params.split(',')
passed = False
for par in params_aux:
if passed and params_list[-1].count('(') != params_list[-1].count(')'):
params_list[-1] += ',' + par
else:
params_list.append(par)
passed = True
aux = len(params_list)
if aux == 1 and params_list[0].find('func') == -1 and len(params_list[0].strip().split(' ')) == 1:
nb = 0
else:
nb = aux
return ' '.join(params_list).find('func') != -1, nb, params_list
def make_args(params_list, count):
ans = []
for i in range(count):
el = params_list[i]
type = get_type(el)
#is function
if type.find('(*') != -1:
ans.append(" Handle<Function> value" + str(i) + " = Handle<Function>::Cast(args[" + str(i) + "]);\n void* arg" + str(i) + " = *value" + str(i) + ";\n")
#print "function " + type
#is string
elif type.find('char*') != -1:
ans.append(" String::Utf8Value value"+ str(i) +"(args["+ str(i) +"]);\n char* arg" + str(i) + " = *value"+ str(i) +";\n")
#print "string " + type
#is void*
elif type.find('void*') != -1:
ans.append(" String::Utf8Value value"+ str(i) +"(args["+ str(i) +"]);\n char* key" + str(i) + " = *value"+ str(i) +";\n void* arg" + str(i) + " = font_[key"+ str(i) +"];\n")
#print "void " + type
#is array
elif type.find('*') != -1:
ans.append(" Handle<Array> arg" + str(i) + " = Array::Cast(args[" + str(i) + "]);\n")
#print "array " + type
#is unsigned integer
elif type.find('unsigned int') != -1:
ans.append(" unsigned int arg" + str(i) + " = args["+ str(i) +"]->Uint32Value();\n")
#print "unsigned int " + type
#is integer
elif type.find('int') != -1 or type.find('enum') != -1:
ans.append(" int arg" + str(i) + " = args["+ str(i) +"]->IntegerValue();\n")
#print "integer " + type
#is double, float
elif type.find('double') != -1 or type.find('float') != -1:
ans.append(" double arg" + str(i) + " = args["+ str(i) +"]->NumberValue();\n")
#print "double " + type
else:
print "don't know what this is "
print type
return ''.join(ans)
def make_call(name, params_list, nb):
return name + "(" + ", ".join([get_type(params_list[i]) + "arg" + str(i) for i in range(nb)]) + ");"
def bind_accessor(prefix, name):
return " " + prefix + "->SetAccessor(String::NewSymbol(\"" + name + "\"), GetGLUT_" + name + ");\n"
def bind_function(prefix, name):
return " " + prefix + "->Set(String::NewSymbol(\"" + name + "\"), FunctionTemplate::New(GLUT" + name + "Callback));\n"
def bind_font(name):
return " font_[\""+ name +"\"] = GLUT_" + name + ";\n"
def get_type(t, parens=True):
if t.find('(*') != -1 or t.find('func') != -1:
ans = t.replace('func', '')
else:
ans = ' '.join(t.strip().split(' ')[:-1]) + '*' * (t.strip().split(' ')[-1].count('*'))
return '(' + ans + ')' if parens else ans
def multiple_replace(dict, text):
""" Replace in 'text' all occurences of any key in the given
dictionary by its corresponding value. Returns the new tring."""
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
main()
| 33.828025
| 188
| 0.553568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,278
| 0.402749
|
e1d196f613c2a1139ba07be80fda44073fa5c141
| 1,602
|
py
|
Python
|
clase_caballo.py
|
DorianAlbertoIbanezNanguelu/concurrencia-caballos
|
91b7f4818505183bd38923bc5b744fc04e83c2f3
|
[
"MIT"
] | null | null | null |
clase_caballo.py
|
DorianAlbertoIbanezNanguelu/concurrencia-caballos
|
91b7f4818505183bd38923bc5b744fc04e83c2f3
|
[
"MIT"
] | null | null | null |
clase_caballo.py
|
DorianAlbertoIbanezNanguelu/concurrencia-caballos
|
91b7f4818505183bd38923bc5b744fc04e83c2f3
|
[
"MIT"
] | null | null | null |
import threading
import time
import random
from multiprocessing.pool import ThreadPool
from PyQt5 import QtCore, QtGui, QtWidgets
bandera = False
val1 = ""
msg = 'Caballo ganador es: {}'
# Clase Caballo
class caballo(threading.Thread):
def __init__(self, num, b1,resultado):
global val1,bandera
threading.Thread.__init__(self)
bandera = False
self.resultado = 20.0
self.tiempo_inicio = time.time()
self.tiempo_final = ""
self.tiempo_total = ""
self.num = num
self.valor = 0
self.boton = b1
self.eleccion= ""
# Selecciona un valor aleatorio, 10 20 o 30
def aleatorio(self):
mylist = ["10","20","30","40"]
self.eleccion = random.choice(mylist)
# Movimiento de los caballos
def movimiento(self):
self.p = self.boton.pos()
self.p += QtCore.QPoint(int(self.eleccion), 0)
self.valor += int(self.eleccion)
self.boton.move(self.p)
time.sleep(0.75)
def retorno(self):
self.resultado
# Hilos
def run(self):
global bandera
while(True):
if bandera == True:
break
else:
self.aleatorio()
self.movimiento()
if self.valor >= 600:
self.tiempo_final = time.time()
self.resultado = self.tiempo_final-self.tiempo_inicio
print("\nEl caballo: " + str(self.num)+" cruzó la meta!!, Tiempo: "+str(self.resultado))
bandera=True
break
| 22.25
| 101
| 0.558052
| 1,379
| 0.860262
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.118528
|
e1d33fe58f921e97b404a9c643f4793d56cc9818
| 10,353
|
py
|
Python
|
vwo/api/track.py
|
wingify/vwo-python-sdk
|
8b8e798a16c43012ca2c6c6c85dde66f4f3cb6a5
|
[
"Apache-2.0"
] | 14
|
2019-08-06T06:57:46.000Z
|
2022-01-05T13:27:50.000Z
|
vwo/api/track.py
|
wingify/vwo-python-sdk
|
8b8e798a16c43012ca2c6c6c85dde66f4f3cb6a5
|
[
"Apache-2.0"
] | 3
|
2019-08-19T10:29:17.000Z
|
2021-09-16T15:59:38.000Z
|
vwo/api/track.py
|
wingify/vwo-python-sdk
|
8b8e798a16c43012ca2c6c6c85dde66f4f3cb6a5
|
[
"Apache-2.0"
] | 10
|
2019-08-08T12:38:50.000Z
|
2021-09-14T11:35:00.000Z
|
# Copyright 2019-2021 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..helpers import impression_util
from ..constants import constants
from ..constants.constants import API_METHODS
from ..helpers import campaign_util, validate_util
from ..enums.log_message_enum import LogMessageEnum
from ..enums.file_name_enum import FileNameEnum
from ..enums.log_level_enum import LogLevelEnum
FILE = FileNameEnum.Api.Track
def _track(vwo_instance, campaign_specifier, user_id, goal_identifier, **kwargs):
"""
This API method: Marks the conversion of the campaign(s) for a particular goal
1. validates the arguments being passed
2. retrieves the campaigns having the same global goal
3. calls track_campaign_goal for all the goals
Args:
campaign_specifier (None, list, string): Campaign key(s), it can be None in case
of all campaigns, list in case of given campaigns and string in case of particular
campaign should to be tracked.
user_id (string): ID assigned to a user
goal_identifier (string): campaign(s)'s unique goal identifier
Keyword Args:
revenue_value (int|float|string): Provide it through **kwargs.
It is the revenue generated on triggering the goal
custom_variables (dict): Custom variables required for segmentation
variation_targeting_variables (dict): Whitelisting variables to target users
Returns:
dict|None: None if called for single campaign and no goal tracked or
called for all campaigns and no goal tracked.
Dict otherwise of campaign_key with True/False showing whether the goal
has been tracked for the campaign or not
"""
vwo_instance.logger.set_api(API_METHODS.TRACK)
# Retrive revenue value and custom_variables
revenue_value = kwargs.get("revenue_value")
custom_variables = kwargs.get("custom_variables")
variation_targeting_variables = kwargs.get("variation_targeting_variables")
valid_params = True
# Check for valid args
if (
not validate_util.is_valid_string(user_id)
or not validate_util.is_valid_string(goal_identifier)
or (custom_variables is not None and not validate_util.is_valid_dict(custom_variables))
or (
variation_targeting_variables is not None and not validate_util.is_valid_dict(variation_targeting_variables)
)
or (revenue_value is not None and not validate_util.is_valid_basic_data_type(revenue_value))
):
valid_params = False
goal_type_to_track = kwargs.get("goal_type_to_track")
if goal_type_to_track is None:
goal_type_to_track = vwo_instance.goal_type_to_track
elif not validate_util.is_valid_goal_type(goal_type_to_track):
valid_params = False
if not valid_params:
vwo_instance.logger.log(
LogLevelEnum.ERROR, LogMessageEnum.ERROR_MESSAGES.TRACK_API_INVALID_PARAMS.format(file=FILE)
)
return None
campaigns_without_goal = []
no_campaign_found = False
if type(campaign_specifier) is str:
campaign = campaign_util.get_campaign(vwo_instance.settings_file, campaign_specifier)
goal = campaign_util.get_campaign_goal(campaign, goal_identifier)
if not goal:
no_campaign_found = True
else:
campaign_goal_list = [(campaign, goal)]
elif type(campaign_specifier) is list:
campaigns = campaign_util.get_campaigns(vwo_instance.settings_file, campaign_specifier).values()
(campaign_goal_list, campaigns_without_goal) = campaign_util.get_campaigns_with_goal_id(
campaigns, goal_identifier
)
for campaign in campaigns_without_goal:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_GOAL_NOT_FOUND.format(
file=FILE, goal_identifier=goal_identifier, user_id=user_id, campaign_key=campaign.get("key")
),
)
elif campaign_specifier is None:
campaigns = vwo_instance.settings_file.get("campaigns")
campaign_goal_list = campaign_util.get_campaigns_with_goal_id(campaigns, goal_identifier)[0]
if not campaign_goal_list:
no_campaign_found = True
else:
vwo_instance.logger.log(
# Specific log for campaign_specifier type
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_INVALID_PARAMS.format(file=FILE),
)
return None
if no_campaign_found:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.NO_CAMPAIGN_FOUND.format(file=FILE, goal_identifier=goal_identifier),
)
return None
ret_value = {}
campaign_goal_revenue_prop_list = []
for campaign, goal in campaign_goal_list:
result = track_campaign_goal(
vwo_instance,
campaign,
user_id,
goal,
revenue_value,
custom_variables,
variation_targeting_variables,
goal_type_to_track,
campaign_goal_revenue_prop_list,
)
ret_value[campaign.get("key")] = result
for campaign in campaigns_without_goal:
ret_value[campaign.get("key")] = False
if len(campaign_goal_revenue_prop_list) != 0 and (
not vwo_instance.is_event_batching_enabled and vwo_instance.is_event_arch_enabled is True
):
params = impression_util.get_events_params(vwo_instance.settings_file, goal_identifier)
impression = impression_util.create_track_goal_events_impression(
vwo_instance.settings_file, user_id, goal_identifier, campaign_goal_revenue_prop_list, revenue=revenue_value
)
vwo_instance.event_dispatcher.dispatch_events(params=params, impression=impression)
return ret_value
def track_campaign_goal(
vwo_instance,
campaign,
user_id,
goal,
revenue_value,
custom_variables,
variation_targeting_variables,
goal_type_to_track,
campaign_goal_revenue_prop_list,
):
"""
It marks the conversion of given goal for the given campaign
1. Checks if user is eligible to get bucketed into the campaign,
2. Gets the assigned determinitic variation to the
user(based on userId), if user becomes part of campaign
3. Sends an impression call to VWO server to track goal data if event arch
is not enabled
Args:
campaign (dict): Campaign object
user_id (string): ID assigned to a user
goal (dict): Goal object
revenue_value (int|float|string): It is the revenue generated on triggering the goal
custom_variables (dict): Custom variables required for segmentation
variation_targeting_variables (dict): Whitelisting variables to target users
goal_type_to_track (vwo.GOAL_TYPES): Goal type that should be tracked in case of mixed
global goal identifier
campaign_goal_revenue_prop_list (list): list of campaign_id, goal_id & goal's revenueProp
(if revenue goal else None) to build event arch impression
Returns:
bool: True if goal successfully tracked else False
"""
campaign_type = campaign.get("type")
if campaign_type == constants.CAMPAIGN_TYPES.FEATURE_ROLLOUT:
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.INVALID_API.format(
file=FILE, user_id=user_id, campaign_key=campaign.get("key"), campaign_type=campaign_type
),
)
return False
goal_type = goal.get("type")
if (goal_type_to_track == constants.GOAL_TYPES.CUSTOM and goal_type == constants.GOAL_TYPES.REVENUE) or (
goal_type_to_track == constants.GOAL_TYPES.REVENUE and goal_type == constants.GOAL_TYPES.CUSTOM
):
# We can log goal type didn't match in debug mode
return False
if goal_type == constants.GOAL_TYPES.REVENUE and not validate_util.is_valid_value(revenue_value):
vwo_instance.logger.log(
LogLevelEnum.ERROR,
LogMessageEnum.ERROR_MESSAGES.TRACK_API_REVENUE_NOT_PASSED_FOR_REVENUE_GOAL.format(
file=FILE, user_id=user_id, goal_identifier=goal.get("identifier"), campaign_key=campaign.get("key")
),
)
return False
if goal_type == constants.GOAL_TYPES.CUSTOM:
revenue_value = None
variation, _ = vwo_instance.variation_decider.get_variation(
user_id,
campaign,
custom_variables=custom_variables,
variation_targeting_variables=variation_targeting_variables,
goal_data={"identifier": goal.get("identifier")},
api_method=constants.API_METHODS.TRACK,
)
if variation:
if not vwo_instance.is_event_arch_enabled or vwo_instance.is_event_batching_enabled is True:
impression = impression_util.create_impression(
vwo_instance.settings_file,
campaign.get("id"),
variation.get("id"),
user_id,
goal.get("id"),
revenue_value,
)
vwo_instance.event_dispatcher.dispatch(impression)
vwo_instance.logger.log(
LogLevelEnum.INFO,
LogMessageEnum.INFO_MESSAGES.MAIN_KEYS_FOR_IMPRESSION.format(
file=FILE,
campaign_id=impression.get("experiment_id"),
account_id=impression.get("account_id"),
variation_id=impression.get("combination"),
),
)
else:
campaign_goal_revenue_prop_list.append((campaign.get("id"), goal.get("id"), goal.get("revenueProp")))
return True
return False
| 40.127907
| 120
| 0.691297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,283
| 0.317106
|
e1d379ffe45c72193de30757e4bad02874d4385a
| 2,687
|
py
|
Python
|
iMessSpam.py
|
fabiopigi/iMessageSpam
|
4d1984f5286f5cf0229d414470a4dc60e5ba12d2
|
[
"MIT"
] | null | null | null |
iMessSpam.py
|
fabiopigi/iMessageSpam
|
4d1984f5286f5cf0229d414470a4dc60e5ba12d2
|
[
"MIT"
] | null | null | null |
iMessSpam.py
|
fabiopigi/iMessageSpam
|
4d1984f5286f5cf0229d414470a4dc60e5ba12d2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#import some dope
import sys
import os
import re
import time
from random import randrange
from itertools import repeat
numbers = {
'adam' :"+41111111111",
'bob' :"+41222222222",
'chris' :"+41333333333",
'dave' :"+41444444444",
}
print "Gespeicherte Empfänger: "
for name in numbers:
print "%10s - %s"%(name,numbers[name])
number = ""
while number == "":
numberID = raw_input("\nEmpfänger eingeben: ")
if numberID in numbers:
number = numbers[numberID]
pause = int(raw_input("\nIntervall in Sekunden: "))
print """
Verfügbare Optionen:
[1] Zeitansagen im Format 'Es ist 17:34:22'
[2] Zufällige 'Chuck Norris' Jokes
[3] Satz für Satz aus einem Buch (Twilight)
[4] Fifty Shades of HEX
[5] Fröhliches Flaggen raten
"""
option = int(raw_input("Option auswählen: "))
if option == 1:
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = 0
elif option == 2:
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = 0
replaceName = raw_input("\n'Chuck Norris' durch Namen ersetzen: ")
if replaceName == "":
replaceName = "Chuck Norris"
elif option == 3:
p = open('content/twilight.txt')
book = p.read()
pat = re.compile(r'([A-Z][^\.!?]*[\.!?])', re.M)
sentences = pat.findall(book)
anzahl = int(raw_input("\nAnzahl Nachrichten: "))
start = int(raw_input("\nBei n. Satz anfangen: "))-1
anzahl = anzahl + (start)
elif option == 4:
anzahl = 50
start = 0
elif option == 5:
anzahl = 50
start = 0
import Countries
else:
anzahl = 0
start = 0
print "\n\nSenden beginnt...\n\n"
#tunay bei 207
for i in range(start,anzahl,1):
if option == 1:
cmdCode = "date +'%H:%M:%S'"
message = "Es ist jetzt " + os.popen(cmdCode).read()
elif option == 2:
curlCode = "curl 'http://api.icndb.com/jokes/random' -s | sed -e 's/.*joke\\\": \\\"//' -e 's/\\\", \\\".*//' -e 's/Chuck Norris/" + replaceName + "/g' -e 's/"/\"/g'"
message = os.popen(curlCode).read()
elif option == 3:
message = sentences[i]
elif option == 4:
message = "#%s" % "".join(list(repeat(hex(randrange(16, 255))[2:],3))).upper()
elif option == 5:
flags = os.listdir("content/flags")
country = Countries.iso[flags[randrange(1,len(flags))][:2]]
message = "Dies ist die Flagge von '%s'."%(country["Name"])
filePath = os.path.abspath("content/flags/%s.png"%country["ISO"])
osaCode = "osascript sendImage.scpt \"%s\" \"%s\""%(number,filePath)
osaReturn = os.popen(osaCode).read()
print message
message = message.replace('"', r'\"')
osaCode = "osascript sendText.scpt \"%s\" \"%s\""%(number,message)
print "%3d > %s"%((i+1),message)
osaReturn = os.popen(osaCode).read()
time.sleep(pause)
| 23.163793
| 175
| 0.628582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,056
| 0.391982
|
e1d4132df41823b278230500d5a9366ca4662b08
| 2,582
|
py
|
Python
|
mesh_to_tet.py
|
NVlabs/deformable_object_grasping
|
c39147c6ce525e90512f54c3c5386903a0e7f401
|
[
"MIT"
] | 30
|
2020-12-18T22:05:10.000Z
|
2021-09-27T23:45:18.000Z
|
mesh_to_tet.py
|
NVlabs/DefGraspSim
|
e6c1a9760ded188e6986cc49d0298a2c8803830d
|
[
"MIT"
] | 2
|
2021-12-09T18:05:22.000Z
|
2022-03-20T08:26:04.000Z
|
mesh_to_tet.py
|
NVlabs/deformable_object_grasping
|
c39147c6ce525e90512f54c3c5386903a0e7f401
|
[
"MIT"
] | 7
|
2021-01-16T06:23:02.000Z
|
2021-09-02T16:32:19.000Z
|
# Copyright (c) 2020 NVIDIA Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Convert a .mesh file (fTetWild format) to .tet (IsaacGym format)."""
def convert_mesh_to_tet(mesh_file_path, tet_output_path):
"""Convert a .mesh file to a .tet file."""
mesh_file = open(mesh_file_path, "r")
tet_output = open(tet_output_path, "w")
mesh_lines = list(mesh_file)
mesh_lines = [line.strip('\n') for line in mesh_lines]
vertices_start = mesh_lines.index('Vertices')
num_vertices = mesh_lines[vertices_start + 1]
vertices = mesh_lines[vertices_start + 2:vertices_start + 2
+ int(num_vertices)]
tetrahedra_start = mesh_lines.index('Tetrahedra')
num_tetrahedra = mesh_lines[tetrahedra_start + 1]
tetrahedra = mesh_lines[tetrahedra_start + 2:tetrahedra_start + 2
+ int(num_tetrahedra)]
print("# Vertices, # Tetrahedra:", num_vertices, num_tetrahedra)
# Write to tet output
tet_output.write("# Tetrahedral mesh generated using\n\n")
tet_output.write("# " + num_vertices + " vertices\n")
for v in vertices:
tet_output.write("v " + v + "\n")
tet_output.write("\n")
tet_output.write("# " + num_tetrahedra + " tetrahedra\n")
for t in tetrahedra:
line = t.split(' 0')[0]
line = line.split(" ")
line = [str(int(k) - 1) for k in line]
l_text = ' '.join(line)
tet_output.write("t " + l_text + "\n")
if __name__ == "__main__":
convert_mesh_to_tet(
"path/to/mesh",
"path/to/tet")
| 40.984127
| 78
| 0.690937
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,411
| 0.546476
|
e1d45c9d42dd76322a265a56bb903e40fa748ffe
| 3,601
|
py
|
Python
|
tests/policies_tests/test_deterministic_policy.py
|
xinyuewang1/chainerrl
|
49425d09cb0749968f4e364e281670e752a46791
|
[
"MIT"
] | 2
|
2020-05-20T06:15:20.000Z
|
2020-05-20T06:15:27.000Z
|
tests/policies_tests/test_deterministic_policy.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | null | null | null |
tests/policies_tests/test_deterministic_policy.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | 1
|
2019-08-08T19:13:53.000Z
|
2019-08-08T19:13:53.000Z
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import unittest
import chainer
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
import numpy as np
import chainerrl
@testing.parameterize(*(
testing.product({
'n_input_channels': [1, 5],
'action_size': [1, 2],
'bound_action': [True, False],
'nonlinearity': ['relu', 'elu'],
'model_class': [chainerrl.policies.FCDeterministicPolicy],
'model_kwargs': testing.product({
'n_hidden_layers': [0, 1, 2],
'n_hidden_channels': [1, 2],
'last_wscale': [1, 1e-3],
}),
}) +
testing.product({
'n_input_channels': [1, 5],
'action_size': [1, 2],
'bound_action': [True, False],
'nonlinearity': ['relu', 'elu'],
'model_class': [chainerrl.policies.FCBNDeterministicPolicy],
'model_kwargs': testing.product({
'n_hidden_layers': [0, 1, 2],
'n_hidden_channels': [1, 2],
'normalize_input': [True, False],
'last_wscale': [1, 1e-3],
}),
}) +
testing.product({
'n_input_channels': [1, 5],
'action_size': [1, 2],
'bound_action': [True, False],
'nonlinearity': ['relu', 'elu'],
'model_class': [chainerrl.policies.FCLSTMDeterministicPolicy],
'model_kwargs': testing.product({
'n_hidden_layers': [0, 1, 2],
'n_hidden_channels': [1, 2],
'last_wscale': [1, 1e-3],
}),
})
))
class TestDeterministicPolicy(unittest.TestCase):
def _make_model(self, **kwargs):
kwargs.update(self.model_kwargs)
return self.model_class(**kwargs)
def _test_call(self, gpu):
# This method only check if a given model can receive random input
# data and return output data with the correct interface.
nonlinearity = getattr(F, self.nonlinearity)
min_action = np.full((self.action_size,), -0.01, dtype=np.float32)
max_action = np.full((self.action_size,), 0.01, dtype=np.float32)
model = self._make_model(
n_input_channels=self.n_input_channels,
action_size=self.action_size,
bound_action=self.bound_action,
min_action=min_action,
max_action=max_action,
nonlinearity=nonlinearity,
)
batch_size = 7
x = np.random.rand(
batch_size, self.n_input_channels).astype(np.float32)
if gpu >= 0:
model.to_gpu(gpu)
x = chainer.cuda.to_gpu(x)
min_action = chainer.cuda.to_gpu(min_action)
max_action = chainer.cuda.to_gpu(max_action)
y = model(x)
self.assertTrue(isinstance(
y, chainerrl.distribution.ContinuousDeterministicDistribution))
a = y.sample()
self.assertTrue(isinstance(a, chainer.Variable))
self.assertEqual(a.shape, (batch_size, self.action_size))
self.assertEqual(chainer.cuda.get_array_module(a),
chainer.cuda.get_array_module(x))
if self.bound_action:
self.assertTrue((a.array <= max_action).all())
self.assertTrue((a.array >= min_action).all())
def test_call_cpu(self):
self._test_call(gpu=-1)
@attr.gpu
def test_call_gpu(self):
self._test_call(gpu=0)
| 33.971698
| 75
| 0.608442
| 1,840
| 0.510969
| 0
| 0
| 3,178
| 0.882533
| 0
| 0
| 590
| 0.163843
|
e1d5174f8289f91757ffb47b8ef0788990d1f6b1
| 33,946
|
py
|
Python
|
freshmaker/handlers/botas/botas_shipped_advisory.py
|
mulaievaRH/freshmaker
|
809b435d7cab1907eb74ecd898693835a92db9d8
|
[
"MIT"
] | 5
|
2020-06-17T11:29:16.000Z
|
2022-03-24T07:20:16.000Z
|
freshmaker/handlers/botas/botas_shipped_advisory.py
|
mulaievaRH/freshmaker
|
809b435d7cab1907eb74ecd898693835a92db9d8
|
[
"MIT"
] | 96
|
2020-06-29T15:01:23.000Z
|
2022-03-30T08:07:06.000Z
|
freshmaker/handlers/botas/botas_shipped_advisory.py
|
mulaievaRH/freshmaker
|
809b435d7cab1907eb74ecd898693835a92db9d8
|
[
"MIT"
] | 20
|
2020-06-16T01:30:08.000Z
|
2022-02-19T15:34:55.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import json
from datetime import datetime
import re
import koji
from kobo.rpmlib import parse_nvr
import semver
from freshmaker import db, conf, log
from freshmaker.handlers import ContainerBuildHandler
from freshmaker.events import BotasErrataShippedEvent, ManualBundleRebuild
from freshmaker.lightblue import ContainerImage
from freshmaker.models import ArtifactBuild, ArtifactType, Event
from freshmaker.types import EventState, ArtifactBuildState, RebuildReason
from freshmaker.pyxis import Pyxis
from freshmaker.kojiservice import KojiService
from freshmaker.errata import Errata
class HandleBotasAdvisory(ContainerBuildHandler):
"""
Handles event that was created by transition of an advisory filed by
BOTAS to SHIPPED_LIVE state
"""
name = "HandleBotasAdvisory"
# This prefix should be added to event reason, when skipping the event.
# Because Release Driver checks event's reason for certain prefixes,
# to determine if there is an error in bundles processing.
_no_bundle_prefix = "No bundles to rebuild: "
def __init__(self, pyxis=None):
super().__init__()
if pyxis:
self._pyxis = pyxis
else:
if not conf.pyxis_server_url:
raise ValueError("'PYXIS_SERVER_URL' parameter should be set")
self._pyxis = Pyxis(conf.pyxis_server_url)
if not conf.freshmaker_root_url or "://" not in conf.freshmaker_root_url:
raise ValueError("'FRESHMAKER_ROOT_URL' parameter should be set to "
"a valid URL")
# Currently processed event
self.event = None
def can_handle(self, event):
if (isinstance(event, BotasErrataShippedEvent) and
'docker' in event.advisory.content_types):
return True
# This handler can handle manual bundle rebuilds too
if isinstance(event, ManualBundleRebuild):
return True
return False
def handle(self, event):
if event.dry_run:
self.force_dry_run()
self.event = event
db_event = Event.get_or_create_from_event(db.session, event)
self.set_context(db_event)
# Check if event is allowed by internal policies
if not self.event.is_allowed(self):
msg = ("This image rebuild is not allowed by internal policy. "
f"message_id: {event.msg_id}")
db_event.transition(EventState.SKIPPED, msg)
self.log_info(msg)
return []
if isinstance(event, ManualBundleRebuild) and \
hasattr(event, 'bundle_images'):
bundles_to_rebuild = self._handle_release_driver_rebuild(db_event)
# automatic rebuild and manual bundle rebuild(triggered by post request)
else:
bundles_to_rebuild = self._handle_bundle_rebuild(db_event)
if not bundles_to_rebuild:
return []
builds = self._prepare_builds(db_event, bundles_to_rebuild)
# Reset context to db_event.
self.set_context(db_event)
self.start_to_build_images(builds)
if all([b.state == ArtifactBuildState.FAILED.value for b in builds]):
db_event.transition(EventState.FAILED, "All bundle rebuilds failed")
else:
msg = f"Advisory {db_event.search_key}: Rebuilding " \
f"{len(db_event.builds.all())} bundle images."
db_event.transition(EventState.BUILDING, msg)
return []
def _handle_bundle_rebuild(self, db_event):
"""
Handle auto rebuild for an advisory created by Botas
OR manually triggered rebuild
:param db_event: database event that represent rebuild event
:rtype: list
:return: list of advisories that should be rebuilt
"""
# Mapping of operators' original build nvrs to rebuilt nvrs in advisory
nvrs_mapping = self._create_original_to_rebuilt_nvrs_map()
original_nvrs = nvrs_mapping.keys()
self.log_info(
"Orignial nvrs of build in the advisory #{0} are: {1}".format(
self.event.advisory.errata_id, " ".join(original_nvrs)))
# Get image manifest_list_digest for all original images, manifest_list_digest is used
# in pullspecs in bundle's related images
original_digests_by_nvr = {}
original_nvrs_by_digest = {}
for nvr in original_nvrs:
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr)
if digest:
original_digests_by_nvr[nvr] = digest
original_nvrs_by_digest[digest] = nvr
else:
log.warning(
f"Image manifest_list_digest not found for original image {nvr} in Pyxis, "
"skip this image"
)
if not original_digests_by_nvr:
msg = f"None of the original images have digests in Pyxis: {','.join(original_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# Get image manifest_list_digest for all rebuilt images, manifest_list_digest is used
# in pullspecs of bundle's related images
rebuilt_digests_by_nvr = {}
rebuilt_nvrs = nvrs_mapping.values()
for nvr in rebuilt_nvrs:
# Don't require that the manifest list digest be published in this case because
# there's a delay from after an advisory is shipped and when the published repositories
# entry is populated
digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr, must_be_published=False)
if digest:
rebuilt_digests_by_nvr[nvr] = digest
else:
log.warning(
f"Image manifest_list_digest not found for rebuilt image {nvr} in Pyxis, "
"skip this image"
)
if not rebuilt_digests_by_nvr:
msg = f"None of the rebuilt images have digests in Pyxis: {','.join(rebuilt_nvrs)}"
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
index_images = self._pyxis.get_operator_indices()
# get latest bundle images per channel per index image filtered
# by the highest semantic version
all_bundles = self._pyxis.get_latest_bundles(index_images)
self.log_debug(
"There are %d bundles that are latest in a channel in the found index images",
len(all_bundles),
)
# A mapping of digests to bundle metadata. This metadata is used to
# for the CSV metadata updates.
bundle_mds_by_digest = {}
# get bundle digests for original images
bundle_digests_by_related_nvr = {}
for image_nvr, image_digest in original_digests_by_nvr.items():
bundles = self._pyxis.get_bundles_by_related_image_digest(
image_digest, all_bundles
)
if not bundles:
log.info(f"No latest bundle image with the related image of {image_nvr}")
continue
for bundle in bundles:
bundle_digest = bundle['bundle_path_digest']
bundle_mds_by_digest[bundle_digest] = bundle
bundle_digests_by_related_nvr.setdefault(image_nvr, []).append(bundle_digest)
if not bundle_digests_by_related_nvr:
msg = "None of the original images have related bundles, skip."
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
self.log_info(
"Found %d bundles with relevant related images", len(bundle_digests_by_related_nvr)
)
# Mapping of bundle digest to bundle data
# {
# digest: {
# "images": [image_amd64, image_aarch64],
# "nvr": NVR,
# "auto_rebuild": True/False,
# "osbs_pinning": True/False,
# "pullspecs": [...],
# }
# }
bundles_by_digest = {}
default_bundle_data = {
'images': [],
'nvr': None,
'auto_rebuild': False,
'osbs_pinning': False,
# CSV modifications for the rebuilt bundle image
'pullspec_replacements': [],
'update': {},
}
# Get images for each bundle digest, a bundle digest can have multiple images
# with different arches.
for digest in bundle_mds_by_digest:
bundles = self._pyxis.get_images_by_digest(digest)
# If no bundle image found, just skip this bundle digest
if not bundles:
self.log_warn('The bundle digest %r was not found in Pyxis. Skipping.', digest)
continue
bundle_nvr = bundles[0]['brew']['build']
# If specific container images where requested to rebuild, process only them
if (isinstance(self.event, ManualBundleRebuild)
and self.event.container_images # noqa: W503
and bundle_nvr not in self.event.container_images): # noqa: W503
self.log_debug("Ignoring '%s', because it's not in requested rebuilds"
" (container_images in request)", bundle_nvr)
continue
# Filter out builds from dependent event that were rebuilt recently
done_build = db_event.get_artifact_build_from_event_dependencies(
bundle_nvr)
if done_build:
self.log_debug("Ignoring '%s' bundle, because it was already rebuilt"
" in dependent event", bundle_nvr)
continue
bundles_by_digest.setdefault(digest, copy.deepcopy(default_bundle_data))
bundles_by_digest[digest]['nvr'] = bundle_nvr
bundles_by_digest[digest]['images'] = bundles
# Unauthenticated koji session to fetch build info of bundles
koji_api = KojiService(conf.koji_profile)
# For each bundle, check whether it should be rebuilt by comparing the
# auto_rebuild_tags of repository and bundle's tags
for digest, bundle_data in bundles_by_digest.items():
bundle_nvr = bundle_data['nvr']
# Images are for different arches, just check against the first image
image = bundle_data['images'][0]
if self.image_has_auto_rebuild_tag(image):
bundle_data['auto_rebuild'] = True
# Fetch buildinfo
buildinfo = koji_api.get_build(bundle_nvr)
related_images = (
buildinfo.get('extra', {})
.get('image', {})
.get('operator_manifests', {})
.get('related_images', {})
)
bundle_data['osbs_pinning'] = related_images.get('created_by_osbs', False)
# Save the original pullspecs
bundle_data['pullspec_replacements'] = related_images.get('pullspecs', [])
# Digests of bundles to be rebuilt
to_rebuild_digests = set()
# Now for each bundle, replace the original digest with rebuilt
# digest (override pullspecs)
for digest, bundle_data in bundles_by_digest.items():
# Override pullspecs only when auto_rebuild is enabled and OSBS-pinning
# mechanism is used.
if not (bundle_data['auto_rebuild'] and bundle_data['osbs_pinning']):
self.log_info(
'The bundle %r does not have auto-rebuild tags (%r) and/or OSBS pinning (%r)',
bundle_data['nvr'],
bundle_data['auto_rebuild'],
bundle_data['osbs_pinning'],
)
continue
csv_name = bundle_mds_by_digest[digest]['csv_name']
version = bundle_mds_by_digest[digest]['version_original']
bundle_data.update(self._get_csv_updates(csv_name, version))
for pullspec in bundle_data['pullspec_replacements']:
# A pullspec item example:
# {
# 'new': 'registry.exampe.io/repo/example-operator@sha256:<sha256-value>',
# 'original': 'registry.example.io/repo/example-operator:v2.2.0',
# 'pinned': True,
# # value used for internal purpose during manual rebuilds, it's an old pullspec that was replaced
# '_old': 'registry.exampe.io/repo/example-operator@sha256:<previous-sha256-value>,
# }
# A pullspec path is in format of "registry/repository@digest"
pullspec_elems = pullspec.get('new').split('@')
old_digest = pullspec_elems[1]
if old_digest not in original_nvrs_by_digest:
# This related image is not one of the original images
continue
# This related image is one of our original images
old_nvr = original_nvrs_by_digest[old_digest]
new_nvr = nvrs_mapping[old_nvr]
new_digest = rebuilt_digests_by_nvr[new_nvr]
# save pullspec that image had before rebuild
pullspec['_old'] = pullspec.get('new')
# Replace the old digest with new digest
pullspec_elems[1] = new_digest
new_pullspec = '@'.join(pullspec_elems)
pullspec['new'] = new_pullspec
# Always set pinned to True when it was replaced by Freshmaker
# since it indicates that the pullspec was modified from the
# original pullspec
pullspec['pinned'] = True
# Once a pullspec in this bundle has been overrided, add this bundle
# to rebuild list
self.log_info(
'Changing pullspec %r to %r in the bundle %r',
pullspec['_old'],
pullspec['new'],
bundle_data['nvr'],
)
to_rebuild_digests.add(digest)
if not to_rebuild_digests:
msg = self._no_bundle_prefix + "No bundle images to rebuild for " \
f"advisory {self.event.advisory.name}"
self.log_info(msg)
db_event.transition(EventState.SKIPPED, msg)
db.session.commit()
return []
bundles_to_rebuild = list(map(lambda x: bundles_by_digest[x],
to_rebuild_digests))
return bundles_to_rebuild
def _handle_release_driver_rebuild(self, db_event):
"""
Handle manual rebuild submitted by Release Driver for an advisory created by Botas
:param db_event: database event that represents a rebuild event
:rtype: list
:return: list of advisories that should be rebuilt
"""
old_to_new_pullspec_map = self._get_pullspecs_mapping()
if not old_to_new_pullspec_map:
msg = self._no_bundle_prefix + 'None of the bundle images have ' \
'applicable pullspecs to replace'
log.warning(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# Unauthenticated koji session to fetch build info of bundles
koji_api = KojiService(conf.koji_profile)
rebuild_nvr_to_pullspecs_map = dict()
# compare replaced pullspecs with pullspecs in 'container_images' and
# create map for bundles that should be rebuilt with their nvrs
for container_image_nvr in self.event.container_images:
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == container_image_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
pullspecs = []
# Try to find build in FM database, if it's not there check in Brew
if artifact_build:
self.log_info(
"%s in the container_images list was found in the database", container_image_nvr
)
pullspecs = artifact_build.bundle_pullspec_overrides["pullspec_replacements"]
else:
self.log_info(
"%s in the container_images list is not in the database. Searching in Brew "
"instead.",
container_image_nvr,
)
# Fetch buildinfo from Koji
buildinfo = koji_api.get_build(container_image_nvr)
# Get the original pullspecs
pullspecs = (
buildinfo.get('extra', {})
.get('image', {})
.get('operator_manifests', {})
.get('related_images', {})
.get('pullspecs', [])
)
for pullspec in pullspecs:
if pullspec.get('new') not in old_to_new_pullspec_map:
self.log_debug("The pullspec %s is not getting replaced", pullspec.get('new'))
continue
# use newer pullspecs in the image
self.log_info(
"Replacing the pullspec %s with %s on %s",
pullspec['new'],
old_to_new_pullspec_map[pullspec['new']],
container_image_nvr,
)
pullspec['new'] = old_to_new_pullspec_map[pullspec['new']]
rebuild_nvr_to_pullspecs_map[container_image_nvr] = pullspecs
if not rebuild_nvr_to_pullspecs_map:
msg = self._no_bundle_prefix + 'None of the container images have ' \
'applicable pullspecs from the input bundle images'
log.info(msg)
db_event.transition(EventState.SKIPPED, msg)
return []
# list with metadata about every bundle to do rebuild
to_rebuild_bundles = []
# fill 'append' and 'update' fields for bundles to rebuild
for nvr, pullspecs in rebuild_nvr_to_pullspecs_map.items():
self.log_debug("Getting the manifest list digest for %s", nvr)
bundle_digest = self._pyxis.get_manifest_list_digest_by_nvr(nvr)
if bundle_digest is not None:
self.log_debug("The manifest list digest for %s is %s", nvr, bundle_digest)
bundles = self._pyxis.get_bundles_by_digest(bundle_digest)
if not bundles:
self.log_error(
"The manifest_list_digest %s is not available on the bundles API endpoint",
bundle_digest,
)
continue
temp_bundle = bundles[0]
csv_updates = (self._get_csv_updates(temp_bundle['csv_name'],
temp_bundle['version_original']))
to_rebuild_bundles.append({
'nvr': nvr,
'update': csv_updates['update'],
'pullspec_replacements': pullspecs,
})
else:
log.warning('Can\'t find manifest_list_digest for bundle '
f'"{nvr}" in Pyxis')
if not to_rebuild_bundles:
msg = 'Can\'t find digests for any of the bundles to rebuild'
log.warning(msg)
db_event.transition(EventState.FAILED, msg)
return []
return to_rebuild_bundles
def _get_pullspecs_mapping(self):
"""
Get map of all replaced pullspecs from 'bundle_images' provided in an event.
:rtype: dict
:return: map of all '_old' pullspecs that was replaced by 'new'
pullspecs in previous Freshmaker rebuilds
"""
old_to_new_pullspec_map = dict()
for bundle_nvr in self.event.bundle_images:
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == bundle_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
if artifact_build is None:
log.warning(
f'Can\'t find build for a bundle image "{bundle_nvr}"')
continue
pullspec_overrides = artifact_build.bundle_pullspec_overrides
for pullspec in pullspec_overrides['pullspec_replacements']:
old_pullspec = pullspec.get('_old', None)
if old_pullspec is None:
continue
old_to_new_pullspec_map[old_pullspec] = pullspec['new']
return old_to_new_pullspec_map
@classmethod
def _get_csv_updates(cls, csv_name, version):
"""
Determine the CSV updates required for the bundle image.
:param str csv_name: the name field in the bundle's ClusterServiceVersion file
:param str version: the version of the bundle image being rebuilt
:return: a dictionary of the CSV updates needed
:rtype: dict
"""
csv_modifications = {}
new_version, fm_suffix = cls._get_rebuild_bundle_version(version)
new_csv_name = cls._get_csv_name(csv_name, version, new_version, fm_suffix)
csv_modifications['update'] = {
'metadata': {
# Update the name of the CSV to something uniquely identify the rebuild
'name': new_csv_name,
# Declare that this rebuild is a substitute of the bundle being rebuilt
'annotations': {'olm.substitutesFor': csv_name}
},
'spec': {
# Update the version of the rebuild to be unique and a newer version than the
# the version of the bundle being rebuilt
'version': new_version,
}
}
return csv_modifications
@classmethod
def _get_rebuild_bundle_version(cls, version):
"""
Get a bundle version for the Freshmaker rebuild of the bundle image.
Examples:
1.2.3 => 1.2.3+0.$timestamp.p (no build ID and not a rebuild)
1.2.3+48273 => 1.2.3+48273.0.$timestamp.p (build ID and not a rebuild)
1.2.3+48273.0.1616457250.p => 1.2.3+48273.0.$timestamp.p (build ID and a rebuild)
:param str version: the version of the bundle image being rebuilt
:return: a tuple of the bundle version of the Freshmaker rebuild of the bundle image and
the suffix that was added by Freshmaker
:rtype: tuple(str, str)
"""
parsed_version = semver.VersionInfo.parse(version)
# Strip off the microseconds of the timestamp
timestamp = int(datetime.utcnow().timestamp())
new_fm_suffix = f'0.{timestamp}.p'
if parsed_version.build:
# Check if the bundle was a Freshmaker rebuild. Include .patched
# for backwards compatibility with the old suffix.
fm_suffix_search = re.search(
r'(?P<fm_suffix>0\.\d+\.(?:p|patched))$', parsed_version.build
)
if fm_suffix_search:
fm_suffix = fm_suffix_search.groupdict()['fm_suffix']
# Get the build without the Freshmaker suffix. This may include a build ID
# from the original build before Freshmaker rebuilt it or be empty.
build_wo_fm_suffix = parsed_version.build[:- len(fm_suffix)]
new_build = f"{build_wo_fm_suffix}{new_fm_suffix}"
else:
# This was not previously rebuilt by Freshmaker so just append the suffix
# to the existing build ID with '.' separating it.
new_build = f"{parsed_version.build}.{new_fm_suffix}"
else:
# If there is existing build ID, then make the Freshmaker suffix the build ID
new_build = new_fm_suffix
# Don't use the replace method in order to support semver 2.8.1
new_version_dict = parsed_version._asdict()
new_version_dict["build"] = new_build
new_version = str(semver.VersionInfo(**new_version_dict))
return new_version, new_fm_suffix
@staticmethod
def _get_csv_name(csv_name, version, rebuild_version, fm_suffix):
"""
Get a bundle CSV name for the Freshmaker rebuild of the bundle image.
:param str csv_name: the name of the ClusterServiceVersion (CSV) file of the bundle image
:param str version: the version of the bundle image being rebuilt
:param str rebuild_version: the new version being assigned by Freshmaker for the rebuild
:param str fm_suffix: the portion of rebuild_version that was generated by Freshmaker
:return: the bundle ClusterServiceVersion (CSV) name of the Freshmaker rebuild of the bundle
image
:rtype: str
"""
# The CSV name must be in the format of a valid DNS name, which means the + from the
# build ID must be replaced. In the event this was a previous Freshmaker rebuild, version
# may have a build ID that would be the DNS safe version in the CSV name.
dns_safe_version = version.replace('+', '-')
if dns_safe_version in csv_name:
dns_safe_rebuild_version = rebuild_version.replace('+', '-')
return csv_name.replace(dns_safe_version, dns_safe_rebuild_version)
else:
return f'{csv_name}.{fm_suffix}'
def get_published_original_nvr(self, rebuilt_nvr):
"""
Search for an original build, that has been built and published to a
repository, and get original_nvr from it
:param str rebuilt_nvr: rebuilt NVR to look build by
:rtype: str or None
:return: original NVR from the first published FM build for given NVR
"""
original_nvr = None
# artifact build should be only one in database, or raise an error
artifact_build = db.session.query(ArtifactBuild).filter(
ArtifactBuild.rebuilt_nvr == rebuilt_nvr,
ArtifactBuild.type == ArtifactType.IMAGE.value,
).one_or_none()
# recursively search for original artifact build
if artifact_build is not None:
original_nvr = artifact_build.original_nvr
# check if image is published
request_params = {'include': 'data.repositories',
'page_size': 1}
images = self._pyxis._pagination(f'images/nvr/{original_nvr}',
request_params)
if not images:
return None
# stop recursion if the image is published in some repo
if any(repo['published'] for repo in images[0].get('repositories')):
return original_nvr
next_nvr = self.get_published_original_nvr(original_nvr)
if next_nvr is not None:
original_nvr = next_nvr
return original_nvr
def image_has_auto_rebuild_tag(self, image):
""" Check if image has a tag enabled for auto rebuild.
:param dict image: Dict representation of an image entity in Pyxis.
:rtype: bool
:return: True if image has a tag enabled for auto rebuild in repository, otherwise False.
"""
for repo in image['repositories']:
# Skip unpublished repository
if not repo['published']:
continue
auto_rebuild_tags = self._pyxis.get_auto_rebuild_tags(
repo['registry'], repo['repository']
)
tags = [t['name'] for t in repo.get('tags', [])]
if set(auto_rebuild_tags) & set(tags):
return True
# It'd be more efficient to do this check first, but the exceptions are edge cases
# (e.g. testing) and it's best to not use it unless absolutely necessary
nvr = image['brew']['build']
parsed_nvr = parse_nvr(nvr)
nv = f'{parsed_nvr["name"]}-{parsed_nvr["version"]}'
if nv in conf.bundle_autorebuild_tag_exceptions:
self.log_info(
'The bundle %r has an exception for being tagged with an auto-rebuild tag', nvr
)
return True
return False
def _create_original_to_rebuilt_nvrs_map(self):
"""
Creates mapping of original operator build NVRs to rebuilt NVRs in advisory.
Including NVRs of the builds from the blocking advisories
:rtype: dict
:return: map of the original NVRs as keys and rebuilt NVRs as values
"""
nvrs_mapping = {}
# Get builds from all blocking advisories
blocking_advisories_builds = \
Errata().get_blocking_advisories_builds(self.event.advisory.errata_id)
# Get builds NVRs from the advisory attached to the message/event and
# then get original NVR for every build
for product_info in self.event.advisory.builds.values():
for build in product_info['builds']:
# Each build is a one key/value pair, and key is the build NVR
build_nvr = next(iter(build))
# Search for the first build that triggered the chain of rebuilds
# for every shipped NVR to get original NVR from it
original_nvr = self.get_published_original_nvr(build_nvr)
if original_nvr is None:
continue
nvrs_mapping[original_nvr] = build_nvr
parsed_build_nvr = parse_nvr(build_nvr)
# Check builds from blocking advisories and add to the mapping
# all of them, that have overlapping package names
for block_build in blocking_advisories_builds:
block_build_nvr = parse_nvr(block_build)
if (block_build_nvr['name'] == parsed_build_nvr['name']
and block_build_nvr['version'] == parsed_build_nvr['version']): # noqa: W503
nvrs_mapping[block_build] = build_nvr
return nvrs_mapping
def _prepare_builds(self, db_event, to_rebuild_bundles):
"""
Prepare models.ArtifactBuild instance for every bundle that will be
rebuilt
:param models.Event db_event: database event that will contain builds
:param list to_rebuild_bundles: bundles to rebuild
:return: builds that already in database and ready to be submitted to brew
:rtype: list
"""
builds = []
csv_mod_url = conf.freshmaker_root_url + "/api/2/pullspec_overrides/{}"
for bundle in to_rebuild_bundles:
# Reset context to db_event for each iteration before
# the ArtifactBuild is created.
self.set_context(db_event)
rebuild_reason = RebuildReason.DIRECTLY_AFFECTED.value
bundle_name = koji.parse_NVR(bundle["nvr"])["name"]
build = self.record_build(
db_event, bundle_name, ArtifactType.IMAGE,
state=ArtifactBuildState.PLANNED.value,
original_nvr=bundle["nvr"],
rebuild_reason=rebuild_reason)
# Set context to particular build so logging shows this build
# in case of error.
self.set_context(build)
build.transition(ArtifactBuildState.PLANNED.value, "")
additional_data = ContainerImage.get_additional_data_from_koji(bundle["nvr"])
build.build_args = json.dumps({
"repository": additional_data["repository"],
"commit": additional_data["commit"],
"target": additional_data["target"],
"branch": additional_data["git_branch"],
"arches": additional_data["arches"],
# The build system always enforces that bundle images build from
# "scratch", so there is no parent image. See:
# https://osbs.readthedocs.io/en/latest/users.html?#operator-manifest-bundle-builds
"original_parent": None,
"operator_csv_modifications_url": csv_mod_url.format(build.id),
})
build.bundle_pullspec_overrides = {
"pullspec_replacements": bundle["pullspec_replacements"],
"update": bundle["update"],
}
db.session.commit()
builds.append(build)
return builds
| 44.607096
| 116
| 0.604018
| 32,227
| 0.949361
| 0
| 0
| 4,775
| 0.140665
| 0
| 0
| 14,209
| 0.418577
|
e1d6a7a8f00c138e84b26623fa12570b059d6d57
| 244
|
py
|
Python
|
src/masonite/contracts/AuthContract.py
|
holic-cl/masonite
|
c5eab7db5f87e389fe83a1f0f20a005035ada9d9
|
[
"MIT"
] | 95
|
2018-02-22T23:54:00.000Z
|
2021-04-17T03:39:21.000Z
|
src/masonite/contracts/AuthContract.py
|
holic-cl/masonite
|
c5eab7db5f87e389fe83a1f0f20a005035ada9d9
|
[
"MIT"
] | 840
|
2018-01-27T04:26:20.000Z
|
2021-01-24T12:28:58.000Z
|
src/masonite/contracts/AuthContract.py
|
holic-cl/masonite
|
c5eab7db5f87e389fe83a1f0f20a005035ada9d9
|
[
"MIT"
] | 100
|
2018-02-23T00:19:55.000Z
|
2020-08-28T07:59:31.000Z
|
from abc import ABC as Contract, abstractmethod
class AuthContract(Contract):
@abstractmethod
def user(self):
pass
@abstractmethod
def save(self):
pass
@abstractmethod
def delete(self):
pass
| 14.352941
| 47
| 0.631148
| 193
| 0.790984
| 0
| 0
| 146
| 0.598361
| 0
| 0
| 0
| 0
|
e1d7080d35e6bb09847310ecab242b0c030ed469
| 2,202
|
py
|
Python
|
netblow/bin/netblow_cli.py
|
viniciusarcanjo/netblow
|
01a2c3a60c5f9eb7e6c199612dedcd01c5dc23ba
|
[
"Apache-2.0"
] | 8
|
2018-10-07T17:44:46.000Z
|
2022-03-24T21:40:57.000Z
|
netblow/bin/netblow_cli.py
|
viniciusarcanjo/netblow
|
01a2c3a60c5f9eb7e6c199612dedcd01c5dc23ba
|
[
"Apache-2.0"
] | 8
|
2018-04-29T20:47:28.000Z
|
2018-05-01T18:51:58.000Z
|
netblow/bin/netblow_cli.py
|
viniciusarcanjo/netblow
|
01a2c3a60c5f9eb7e6c199612dedcd01c5dc23ba
|
[
"Apache-2.0"
] | 1
|
2019-04-27T08:48:50.000Z
|
2019-04-27T08:48:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""netblow_cli module."""
import argparse
from netblow.netblow import NetBlow
from netblow.version import __version__
def main():
"""Entry function."""
parser = argparse.ArgumentParser(
description="netblow. Vendor agnostic network testing framework to stress network failures." # noqa
)
# to add required args.
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
m_group = optional.add_mutually_exclusive_group()
m_group.add_argument(
'-d',
'--dryrun',
default=False,
action='store_true',
help="show tests calls, won't connect to any devices")
m_group.add_argument(
'-c',
'--concheck',
default=False,
action='store_true',
help='check connectivity with all devices in the topology')
m_group.add_argument(
'-1',
'--once',
default=False,
action='store_true',
help="iterates only once and perfom napalm diffs")
parser.add_argument(
'-l',
'--level',
choices=['info', 'debug'],
default='info',
help='logging verbosity level (default: info)')
parser.add_argument(
'-v',
'--version',
action='version',
version='{}'.format(__version__),
help='show version')
required.add_argument(
'-f', '--topology', help='topology yml file')
required.add_argument(
'-t', '--tests', help='tests yml file')
parser._action_groups.append(optional)
args = parser.parse_args()
if not args.topology:
parser.error('You have to specify the topology yml file with -f')
if not args.tests:
if args.once or not args.dryrun and not args.concheck:
parser.error('You have to specify the tests yml file with -t')
NetBlow(
topo_file=args.topology,
test_file=args.tests,
dry_run=args.dryrun,
enable_salt=False,
iter_once=args.once,
auto_open=True,
auto_test=True,
con_check=args.concheck,
level=args.level)
if __name__ == "__main__":
main()
| 29.36
| 108
| 0.609446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 730
| 0.331517
|
e1d83fca2e1bb93962f5e57c6f7075495edf9d91
| 8,688
|
py
|
Python
|
src/06_tool/regular_expression.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | 1
|
2022-03-06T13:03:56.000Z
|
2022-03-06T13:03:56.000Z
|
src/06_tool/regular_expression.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | null | null | null |
src/06_tool/regular_expression.py
|
edgardeng/python-advance-interview
|
59fd7bee8e871acdc7fdfecf2a110db840c47ebb
|
[
"Apache-2.0"
] | null | null | null |
'''
' Python Regular Expression 正则表达式
'
'''
import re
def test_match():
s = 'hello python Hello'
p = 'hello'
o = re.match(p, s)
print(o)
print(dir(o))
print(o.group()) # 返回匹配的字符串
print(o.span()) # 范围
print(o.start()) # 开始处
print('*' * 30, 'flags参数的使用')
o2 = re.match(p, s, re.L)
print(o2.group()) # 返回匹配的字符串
# 常用字符的使用
def test_match_character():
print('-' * 30, ' . 匹配任意一个字符')
print(re.match('.', 'abv'))
print(re.match('.', '12'))
print(re.match('.', '\n'))
print('-' * 30, ' \d 匹配数字 0-9')
print(re.match('\d', 'abc456'))
print(re.match('\d', '234svd'))
print('-' * 30, ' \D 匹配非数字 0-9')
print(re.match('\D', 'abc456'))
print(re.match('\D', '234svd'))
print('-' * 30, ' \s 匹配空白字符')
print(re.match('\s', '\n12\t'))
print(re.match('\s', '\t'))
print(re.match('\s', 'addd'))
print('-' * 30, ' \S 匹配非空白字符')
print(re.match('\S', '\n12\t'))
print(re.match('\S', '\t'))
print(re.match('\S', 'addd'))
print('-' * 30, ' \w 匹配字母、数字')
print(re.match('\w', 'AB'))
print(re.match('\w', 'ab'))
print(re.match('\w', '12'))
print(re.match('\w', '__'))
print(re.match('\w', '##'))
print('-' * 30, ' \W 匹配非 字母、数字')
print(re.match('\W', 'AB'))
print(re.match('\W', 'ab'))
print(re.match('\W', '12'))
print(re.match('\W', '__'))
print(re.match('\W', '##'))
print('-' * 30, ' \[] 匹配列表中的字符')
print(re.match('[2468]', '22'))
print(re.match('[2468]', '33'))
print(re.match('[2468]', '83'))
print(re.match('[2468]', '38'))
def test_match_phone():
print('-' * 30, ' 匹配手机号')
patten = '\d\d\d\d\d\d\d\d\d\d\d'
print(re.match(patten, '13466669999'))
print(re.match('1[345789]\d\d\d\d\d\d\d\d\d', '13466669999'))
# 限定符
def test_match_qualifier():
print('-' * 30, ' * 匹配零次或多次')
print(re.match('\d*', '123abc')) # 匹配开头的数字
print(re.match('\d*', 'abc'))
print('-' * 30, ' + |匹配一次或多次')
print(re.match('\d+', '123abc')) # 匹配开头的数字
print(re.match('\d+', 'abc'))
print('-' * 30, ' ? |匹配一次或零次')
print(re.match('\d?', '1abc'))
print(re.match('\d?', '123abc')) # 匹配开头的数字
print(re.match('\d?', 'abc'))
print('-' * 30, ' {m} |重复m次')
print(re.match('\d{2}', '123abc')) # 匹配开头2个数字
print(re.match('\d{2}', '12abc'))
print(re.match('\d{2}', '1abc'))
print(re.match('\d{2}', 'abc'))
print('-' * 30, '{m,n}|重复m到n次')
print(re.match('\d{1,3}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{1,3}', '123abc'))
print(re.match('\d{1,3}', '12abc'))
print(re.match('\d{1,3}', '1abc'))
print(re.match('\d{1,3}', 'abc'))
print('-' * 30, '{m,}|至少m次')
print(re.match('\d{2,}', '1234abc')) # 匹配开头2个数字
print(re.match('\d{2,}', '123abc'))
print(re.match('\d{2,}', '12abc'))
print(re.match('\d{2,}', '1abc'))
print(re.match('\d{2,}', 'abc'))
print('-' * 30, '案例1 首字母为大写字符,其他小写字符')
print(re.match('[A-Z][a-z]*', 'abc'))
print(re.match('[A-Z][a-z]*', 'ABC'))
print(re.match('[A-Z][a-z]*', 'Abc'))
print(re.match('[A-Z][a-z]*', 'AbC'))
print('-' * 30, '案例2 有效变量名 字母数字下划线,数字不开头')
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc'))
print(re.match('[a-zA-Z_]\w*', 'abc'))
print(re.match('[a-zA-Z_][a-zA-Z0-9_]*', 'abc123'))
print(re.match('[a-zA-Z_]\w*', '123abc'))
print(re.match('[a-zA-Z_]\w*', '_123abc'))
print('-' * 30, '案例2 1-99的数字')
print(re.match('[1-9]\d?', '23abc'))
print(re.match('[1-9]\d?', '100'))
print(re.match('[1-9]\d?', '11'))
print(re.match('[1-9]\d?', '1'))
print(re.match('[1-9]\d?', '0'))
print(re.match('[1-9]\d?', '09'))
print('-' * 30, '案例2 8-20随机密码 大写,小写,下划线,数字')
print(re.match('\w{8,20}', '1234567'))
print(re.match('\w{8,20}', '1234567$$'))
print(re.match('\w{8,20}', '1234567abc_'))
print(re.match('\w{8,20}', '1234567abc#'))
print(re.match('\w{8,20}', '12345678901234567890zx'))
# 转义字符 原生字符
def escape_character():
print('C:\t\d\e')
print('C:\\t\\d\\e')
print(r'C:\t\d\e')
# 边界字符
def boundary():
print('-' * 30, '$ 匹配字符串结尾')
print(re.match('[1-9]\d{4,9}@qq.com', '1234567@qq.com'))
print(re.match('[1-9]\d{4,9}@qq.com', '1234567@qq.com.126.cn'))
print(re.match(r'[1-9]\d{4,9}@qq.com$', '1234567@qq.com'))
print(re.match(r'[1-9]\d{4,9}@qq.com$', '1234567@qq.com.126.cn'))
print('-' * 30, ' ^ 匹配字符串开头')
print(re.match(r'^hello.*', 'hello abc'))
print(re.match(r'^hello.*', 'abc hello abc'))
print('-' * 30, ' \b 匹配单词的边界')
print(re.match(r'.*\bab', '123 aabc')) # 单词 ab 开始
print(re.match(r'.*\bab', '123 abcd'))
print(re.match(r'.*\bab', '123 aaa'))
print(re.match(r'.*\bab', '123 abcd cdab'))
print(re.match(r'.*ab\b', '123 abc')) # 单词 ab 结尾
print(re.match(r'.*ab\b', '123 aaa'))
print(re.match(r'.*ab\b', '123 ab'))
print(re.match(r'.*ab\b', '123 cdab'))
print(re.match(r'.*ab\b', '123 abcd cdab'))
def test_search():
print(re.match(r'hello', 'hello python'))
print(re.search(r'hello', 'hello python'))
print(re.match(r'hello', 'python hello'))
print(re.search(r'hello', 'python hello '))
print(re.match('aa|bb|cc', 'aa'))
print(re.match('aa|bb|cc', 'bbb'))
print(re.match('aa|bb|cc', 'ccc'))
print(re.match('aa|bb|cc', 'a bb ccc'))
print(re.search('aa|bb|cc', 'a bb ccc'))
# 多个字符
def test_multi_character():
print('-' * 30, '案例 0-100之间的数字: 0-99 | 100')
print(re.match('[1-9]?\d|100', '1'))
print(re.match('[1-9]?\d|100', '11'))
print(re.match('[1-9]?\d|100', '100'))
print(re.match('[1-9]?\d$|100$', '100'))
print(re.match('[1-9]?\d$|100$', '1000'))
print('-' * 30, '案例 ')
print(re.match('[ab][cd]', 'ab'))
print(re.match('[ab][cd]', 'ac'))
print(re.match('[ab][cd]', 'ad'))
print(re.match('ab|cd', 'abc'))
print(re.match('ab|cd', 'ac'))
# 匹配分组
def test_group():
print('-' * 30, '座机号码 区号{3,4} 号码{5,8} 010-0000 0791-222222')
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '010-88888888'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-10086'))
print(re.match(r'\d{3,4}-[1-9]\d{4,7}', '1111-88888888'))
print('-' * 30, ' 匹配分组')
o = re.match(r'(\d{3,4})-([1-9]\d{4,7})', '1111-88888888')
print(o)
print(o.group(0), o.group(1), o.group(2))
print(o.groups(), o.groups()[0], o.groups()[1])
print('-' * 30, 'html 标签')
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</a></html>'))
print(re.match(r'<.+><.+>.+</.+></.+>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><a>abc</b></html>'))
print(re.match(r'<(.*)><(.*)>.*</\2></\1>', '<html><d>abc</d></html>'))
print('-' * 30, 'html 标签 - 别名')
print(re.match(r'<(?P<k_html>.+)><(?P<k_head>.+)>.*</(?P=k_head)></(?P=k_html)>', '<html><d>abc</d></html>'))
## 搜索与替换
def test_sub():
print('-' * 30, ' 替换')
print(re.sub(r'#.*$', '', '2004-222-23322 # 这是个什么')) # 替换#开头的部分
print(re.sub(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print('-' * 30, ' 替换 subn')
print(re.subn(r'#\D*', '', '2004-222-23322 # 这是个什么'))
print(re.subn(r'#.*$', '', '2004-222-23322 # 这是个什么'))
def test_compile():
print('-' * 30, ' compile的使用')
regex = re.compile(r'\w+') # 匹配字母或数字
print(regex.match('1223dfdf'))
print(regex.match('##1223dfdf'))
def test_findall():
print('-' * 30, ' findall 返回数组')
print(re.findall(r'\w', '##1223dfdf')) # 匹配字母或数字 f
print(re.findall(r'\w+', '## 1223 df df 1'))
print('-' * 30, ' finditer 返回迭代器')
print(re.finditer(r'\w+', '## 1223 df df 1'))
for i in re.finditer(r'\w+', '## 1223 df df 1'):
print(i, i.group())
def test_split():
print('-' * 30, ' split 返回数组')
print(re.split(r'\d+', '123abc123abc'))
print(re.split(r'\d+', '123 abc 123 abc'))
print(re.split(r'\d+', 'abc123 abc 123 abc'))
print(re.split(r'\d+', 'abc 123 abc 123 abc',1))
def greedy_mode():
print('-' * 30, ' 贪婪模式')
result = re.match(r'(.+)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 非贪婪模式 尽可能少的匹配')
result = re.match(r'(.+?)(\d+-\d+-\d+)', 'this is my tel: 122-1244-1242')
print(result.group(1))
print(result.group(2))
print('-' * 30, ' 贪婪模式')
print(re.match(r'abc(\d+)', 'abc123456'))
print(re.match(r'abc(\d+?)', 'abc123456'))
if __name__ == '__main__':
# test_match()
# test_match_character()
# test_match_phone()
# test_match_qualifier()
# escape_character()
# boundary()
# test_search()
# test_multi_character()
# test_group()
# test_sub()
# test_compile()
# test_findall()
# test_split()
# greedy_mode()
# <.+><.+>.+</.+></.+>
s = '<link href="../assets/css/app.css?t=20112455" type="text/css" rel="stylesheet">'
mathched = re.findall(r'\S+assets/css/\S+.css\S+"', s)
for m in mathched:
print(m, m.index('.css'))
s = s.replace(m, m[:m.index('.css')] + '.css?t=00000"')
print(s)
| 30.484211
| 111
| 0.536027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,861
| 0.512872
|
e1da747be2e0ff514420a41a6547dfb4970c7ba6
| 166
|
py
|
Python
|
dot_dotfiles/mail/dot_offlineimap.py
|
TheRealOne78/dots
|
52c59dae1fccb7392ceeb16ac564f6a18ee4a159
|
[
"MIT"
] | 758
|
2016-11-19T22:52:34.000Z
|
2022-03-29T00:43:57.000Z
|
dot_dotfiles/mail/dot_offlineimap.py
|
TheRealOne78/dots
|
52c59dae1fccb7392ceeb16ac564f6a18ee4a159
|
[
"MIT"
] | 27
|
2017-02-09T23:28:58.000Z
|
2022-03-22T21:35:24.000Z
|
dot_dotfiles/mail/dot_offlineimap.py
|
TheRealOne78/dots
|
52c59dae1fccb7392ceeb16ac564f6a18ee4a159
|
[
"MIT"
] | 82
|
2016-12-23T04:42:00.000Z
|
2022-03-29T19:25:16.000Z
|
#! /usr/bin/env python2
# -*- coding: utf8 -*-
from subprocess import check_output
def get_pass():
return check_output("pass gmail/me", shell=True).strip("\n")
| 20.75
| 64
| 0.680723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 64
| 0.385542
|
e1dd3f3740e16e48cf7fbe8dce94d776bef908fd
| 1,139
|
py
|
Python
|
tests/encoding-utils/test_big_endian_integer.py
|
carver/ethereum-utils
|
7ec2495b25107776cb4e0e4a79af8a8c64f622c4
|
[
"MIT"
] | null | null | null |
tests/encoding-utils/test_big_endian_integer.py
|
carver/ethereum-utils
|
7ec2495b25107776cb4e0e4a79af8a8c64f622c4
|
[
"MIT"
] | null | null | null |
tests/encoding-utils/test_big_endian_integer.py
|
carver/ethereum-utils
|
7ec2495b25107776cb4e0e4a79af8a8c64f622c4
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import pytest
from hypothesis import (
strategies as st,
given,
)
from eth_utils.encoding import (
int_to_big_endian,
big_endian_to_int,
)
@pytest.mark.parametrize(
'as_int,as_big_endian',
(
(0, b'\x00'),
(1, b'\x01'),
(7, b'\x07'),
(8, b'\x08'),
(9, b'\x09'),
(256, b'\x01\x00'),
(2**256 - 1, b'\xff' * 32),
),
)
def test_big_endian_conversions(as_int, as_big_endian):
as_int_result = big_endian_to_int(as_big_endian)
assert as_int_result == as_int
as_big_endian_result = int_to_big_endian(as_int)
assert as_big_endian_result == as_big_endian
@given(value=st.integers(min_value=0, max_value=2**256 - 1))
def test_big_endian_round_trip_from_int(value):
result = big_endian_to_int(int_to_big_endian(value))
assert result == value
@given(
value=st.binary(min_size=1, max_size=32).map(
lambda v: v.lstrip(b'\x00') or b'\x00'
)
)
def test_big_endian_round_trip_from_big_endian(value):
result = int_to_big_endian(big_endian_to_int(value))
assert result == value
| 22.78
| 60
| 0.665496
| 0
| 0
| 0
| 0
| 932
| 0.818262
| 0
| 0
| 89
| 0.078139
|
e1dd62f3dbffbbc08c5996a09c39db0640f82f31
| 1,086
|
py
|
Python
|
src/data/normalization.py
|
poly-ai/fluid-surface-estimation
|
b2e310f38c3cce3c13fbf0b8277ee4eb00755d36
|
[
"MIT"
] | 2
|
2022-02-15T21:41:06.000Z
|
2022-02-16T04:54:51.000Z
|
src/data/normalization.py
|
poly-ai/fluid-surface-estimation
|
b2e310f38c3cce3c13fbf0b8277ee4eb00755d36
|
[
"MIT"
] | null | null | null |
src/data/normalization.py
|
poly-ai/fluid-surface-estimation
|
b2e310f38c3cce3c13fbf0b8277ee4eb00755d36
|
[
"MIT"
] | null | null | null |
import numpy as np
# Normalize dataset such that all sequences have min value 0.0, max value 1.0
def normalize(dataset, lower_lim=0.0, upper_lim=1.0):
seq_mins = dataset.min(axis=(1, 2, 3))
seq_maxes = dataset.max(axis=(1, 2, 3))
dataset -= seq_mins.reshape((-1, 1, 1, 1))
dataset /= (seq_maxes - seq_mins).reshape((-1, 1, 1, 1))
return dataset
# Normalize only the sequences in the data that have value outside range [0, 1)
# Normalizes these sequences to have min value 0.0, max value 1.0
def normalize_only_outliers(dataset, lower_lim=0.0, upper_lim=1.0):
# Scale and offset each sequence so that all values are within [0,1)
seq_mins = dataset.min(axis=(1, 2, 3))
seq_maxes = dataset.max(axis=(1, 2, 3))
# Limit normalization only to waves that are out of the range [0,1)
active = np.logical_or(
np.less(seq_mins, lower_lim), np.greater(seq_maxes, upper_lim)
)
dataset[active] -= seq_mins[active].reshape((-1, 1, 1, 1))
dataset[active] /= (seq_maxes - seq_mins)[active].reshape((-1, 1, 1, 1))
return dataset
| 32.909091
| 79
| 0.669429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 356
| 0.327808
|
e1de48b63ed82ddff16804877e556e037ff413c0
| 1,487
|
py
|
Python
|
setup.py
|
fwitte/PyPSA
|
fa2ca201a4f0b3b5f8705a5927475ebb021dbee5
|
[
"MIT"
] | null | null | null |
setup.py
|
fwitte/PyPSA
|
fa2ca201a4f0b3b5f8705a5927475ebb021dbee5
|
[
"MIT"
] | null | null | null |
setup.py
|
fwitte/PyPSA
|
fa2ca201a4f0b3b5f8705a5927475ebb021dbee5
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from setuptools import setup, find_packages
from codecs import open
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pypsa',
version='0.19.1',
author='PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html',
author_email='t.brown@tu-berlin.de',
description='Python for Power Systems Analysis',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/PyPSA/PyPSA',
license='MIT',
packages=find_packages(exclude=['doc', 'test']),
include_package_data=True,
python_requires='>=3.6',
install_requires=[
'numpy',
'scipy',
'pandas>=0.24.0',
'xarray',
'netcdf4',
'tables',
'pyomo>=5.7',
'matplotlib',
'networkx>=1.10',
'deprecation'
],
extras_require = {
"dev": ["pytest", "pypower", "pandapower", "scikit-learn"],
"cartopy": ['cartopy>=0.16'],
"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme", "nbsphinx", "nbsphinx-link", "black"],
'gurobipy':['gurobipy']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
])
| 29.156863
| 97
| 0.604573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.486214
|
e1de6c9ea1e78727fc2d5bc8690e68e41338f516
| 556
|
py
|
Python
|
quiz/urls.py
|
Hysham/Quiz-Hoster
|
19067e3d584cb97562e73d332fdfe74eb49524cc
|
[
"MIT"
] | 1
|
2020-03-22T13:36:27.000Z
|
2020-03-22T13:36:27.000Z
|
quiz/urls.py
|
Hysham/Quiz-Hoster
|
19067e3d584cb97562e73d332fdfe74eb49524cc
|
[
"MIT"
] | null | null | null |
quiz/urls.py
|
Hysham/Quiz-Hoster
|
19067e3d584cb97562e73d332fdfe74eb49524cc
|
[
"MIT"
] | 1
|
2020-04-02T15:32:12.000Z
|
2020-04-02T15:32:12.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.quiz_home, name='quiz-home'),
path('page/<int:page_no>/', views.quiz_page, name='quiz-page' ),
path('about/', views.quiz_about, name='quiz-about'),
path('submit/', views.quiz_submit, name='quiz-submit'),
## after quiz end
path('view_result/<int:page_no>/', views.quiz_view_result, name='quiz-view_result'),
path('leaderboard/', views.quiz_leaderboard, name='quiz-leaderboard'),
path('feedback/', views.quiz_feedback, name='quiz-feedback'),
]
| 37.066667
| 88
| 0.681655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 208
| 0.374101
|
e1dfa37abe08ed294d2a701673731176a4e461e5
| 3,500
|
py
|
Python
|
jamf/setconfig.py
|
pythoninthegrass/python-jamf
|
f71a44f4565fc2824ce6daf536359d563ab75ea3
|
[
"MIT"
] | 25
|
2020-11-02T18:16:22.000Z
|
2022-03-07T04:36:14.000Z
|
jamf/setconfig.py
|
pythoninthegrass/python-jamf
|
f71a44f4565fc2824ce6daf536359d563ab75ea3
|
[
"MIT"
] | 17
|
2020-12-22T19:24:05.000Z
|
2022-03-02T22:39:04.000Z
|
jamf/setconfig.py
|
pythoninthegrass/python-jamf
|
f71a44f4565fc2824ce6daf536359d563ab75ea3
|
[
"MIT"
] | 12
|
2020-10-28T19:03:29.000Z
|
2022-03-01T08:29:52.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Jamf Config
"""
__author__ = "Sam Forester"
__email__ = "sam.forester@utah.edu"
__copyright__ = "Copyright (c) 2020 University of Utah, Marriott Library"
__license__ = "MIT"
__version__ = "1.0.4"
import argparse
import getpass
import jamf
import logging
import platform
import pprint
import sys
from os import path
class Parser:
def __init__(self):
myplatform = platform.system()
if myplatform == "Darwin":
default_pref = jamf.config.MACOS_PREFS_TILDA
elif myplatform == "Linux":
default_pref = jamf.config.LINUX_PREFS_TILDA
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
"-H", "--hostname", help="specify hostname (default: prompt)"
)
self.parser.add_argument(
"-u", "--user", help="specify username (default: prompt)"
)
self.parser.add_argument(
"-p", "--passwd", help="specify password (default: prompt)"
)
self.parser.add_argument(
"-C",
"--config",
dest="path",
metavar="PATH",
default=default_pref,
help=f"specify config file (default {default_pref})",
)
self.parser.add_argument(
"-P",
"--print",
action="store_true",
help="print existing config profile (except password!)",
)
self.parser.add_argument(
"-t",
"--test",
action="store_true",
help="Connect to the Jamf server using the config file",
)
def parse(self, argv):
"""
:param argv: list of arguments to parse
:returns: argparse.NameSpace object
"""
return self.parser.parse_args(argv)
def setconfig(argv):
logger = logging.getLogger(__name__)
args = Parser().parse(argv)
logger.debug(f"args: {args!r}")
if args.path:
config_path = args.path
else:
myplatform = platform.system()
if myplatform == "Darwin":
default_pref = jamf.config.MACOS_PREFS_TILDA
elif myplatform == "Linux":
default_pref = jamf.config.LINUX_PREFS_TILDA
config_path = default_pref
if config_path[0] == "~":
config_path = path.expanduser(config_path)
if args.test:
api = jamf.API(config_path=config_path)
pprint.pprint(api.get("accounts"))
elif args.print:
conf = jamf.config.Config(prompt=False, explain=True, config_path=config_path)
print(conf.hostname)
print(conf.username)
if conf.password:
print("Password is set")
else:
print("Password is not set")
else:
if args.hostname:
hostname = args.hostname
else:
hostname = jamf.config.prompt_hostname()
if args.user:
user = args.user
else:
user = input("username: ")
if args.passwd:
passwd = args.passwd
else:
passwd = getpass.getpass()
conf = jamf.config.Config(
hostname=hostname, username=user, password=passwd, prompt=False
)
conf.save(config_path=config_path)
def main():
fmt = "%(asctime)s: %(levelname)8s: %(name)s - %(funcName)s(): %(message)s"
logging.basicConfig(level=logging.INFO, format=fmt)
setconfig(sys.argv[1:])
if __name__ == "__main__":
main()
| 28.225806
| 86
| 0.575429
| 1,466
| 0.418857
| 0
| 0
| 0
| 0
| 0
| 0
| 847
| 0.242
|
e1e00ce354ffc24242ad31b4a0c1c5120baf617a
| 979
|
py
|
Python
|
src/menuResponse/migrations/0001_initial.py
|
miguelaav/dev
|
5ade9d0b393f48c9cc3b160b6ede4a03c29addea
|
[
"bzip2-1.0.6"
] | null | null | null |
src/menuResponse/migrations/0001_initial.py
|
miguelaav/dev
|
5ade9d0b393f48c9cc3b160b6ede4a03c29addea
|
[
"bzip2-1.0.6"
] | 6
|
2020-06-05T20:02:33.000Z
|
2022-03-11T23:43:11.000Z
|
src/menuResponse/migrations/0001_initial.py
|
miguelaav/dev
|
5ade9d0b393f48c9cc3b160b6ede4a03c29addea
|
[
"bzip2-1.0.6"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-12 17:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('menuCreate', '0001_initial'),
('menu', '0002_remove_menu_slug'),
]
operations = [
migrations.CreateModel(
name='MenuResponseModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=200)),
('date', models.DateField(auto_now_add=True)),
('MenuID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menuCreate.MenuCreateModel')),
('option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='menu.Menu')),
],
),
]
| 32.633333
| 124
| 0.622063
| 787
| 0.803882
| 0
| 0
| 0
| 0
| 0
| 0
| 225
| 0.229826
|
e1e2d0a67c83cc0cf6dbbc60b3dc2efff897636e
| 10,889
|
py
|
Python
|
datacube/drivers/s3/storage/s3aio/s3aio.py
|
Zac-HD/datacube-core
|
ebc2025b6fb9d22fb406cdf5f79eba6d144c57e3
|
[
"Apache-2.0"
] | 2
|
2018-12-02T11:33:50.000Z
|
2021-04-24T11:42:42.000Z
|
datacube/drivers/s3/storage/s3aio/s3aio.py
|
Zac-HD/datacube-core
|
ebc2025b6fb9d22fb406cdf5f79eba6d144c57e3
|
[
"Apache-2.0"
] | 103
|
2018-03-21T15:00:05.000Z
|
2020-06-04T05:40:25.000Z
|
datacube/drivers/s3/storage/s3aio/s3aio.py
|
roarmstrong/datacube-core
|
5e38638dabd9e5112e92b503fae6a83c8dcc4902
|
[
"Apache-2.0"
] | null | null | null |
"""
S3AIO Class
Array access to a single S3 object
"""
from __future__ import absolute_import
import SharedArray as sa
import zstd
from itertools import repeat, product
import numpy as np
from pathos.multiprocessing import ProcessingPool
from six.moves import zip
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .s3io import S3IO, generate_array_name
class S3AIO(object):
def __init__(self, enable_compression=True, enable_s3=True, file_path=None, num_workers=30):
"""Initialise the S3 array IO interface.
:param bool enable_s3: Flag to store objects in s3 or disk.
True: store in S3
False: store on disk (for testing purposes)
:param str file_path: The root directory for the emulated s3 buckets when enable_se is set to False.
:param int num_workers: The number of workers for parallel IO.
"""
self.s3io = S3IO(enable_s3, file_path, num_workers)
self.pool = ProcessingPool(num_workers)
self.enable_compression = enable_compression
def to_1d(self, index, shape):
"""Converts nD index to 1D index.
:param tuple index: N-D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the 1D index.
"""
return np.ravel_multi_index(index, shape)
def to_nd(self, index, shape):
"""Converts 1D index to nD index.
:param tuple index: 1D Index to be converted.
:param tuple shape: Shape to be used for conversion.
:return: Returns the ND index.
"""
return np.unravel_index(index, shape)
def get_point(self, index_point, shape, dtype, s3_bucket, s3_key):
"""Gets a point in the nd array stored in S3.
Only works if compression is off.
:param tuple index_point: Index of the point to be retrieved.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the point data.
"""
item_size = np.dtype(dtype).itemsize
idx = self.to_1d(index_point, shape) * item_size
if self.enable_compression:
b = self.s3io.get_bytes(s3_bucket, s3_key)
cctx = zstd.ZstdDecompressor()
b = cctx.decompress(b)[idx:idx + item_size]
else:
b = self.s3io.get_byte_range(s3_bucket, s3_key, idx, idx + item_size)
a = np.frombuffer(b, dtype=dtype, count=-1, offset=0)
return a
def cdims(self, slices, shape):
return [sl.start == 0 and sl.stop == sh and (sl.step is None or sl.step == 1)
for sl, sh in zip(slices, shape)]
def get_slice(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# convert array_slice into into sub-slices of maximum contiguous blocks
# Todo:
# - parallelise reads and writes
# - option 1. get memory rows in parallel and merge
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
# truncate array_slice to shape
# array_slice = [slice(max(0, s.start) - min(sh, s.stop)) for s, sh in zip(array_sliced, shape)]
array_slice = [slice(max(0, s.start), min(sh, s.stop)) for s, sh in zip(array_slice, shape)]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
# print(cell, sub_range)
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
# print(s3_start, s3_end)
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
def get_slice_mp(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 in parallel.
Only works if compression is off.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# pylint: disable=too-many-locals
def work_get_slice(block, array_name, offset, s3_bucket, s3_key, shape, dtype):
result = sa.attach(array_name)
cell, sub_range = block
item_size = np.dtype(dtype).itemsize
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
# data = data.reshape([s.stop - s.start for s in sub_range])
result[t] = data.reshape([s.stop - s.start for s in sub_range])
if self.enable_compression:
return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
offset = [s.start for s in array_slice]
array_name = generate_array_name('S3AIO')
sa.create(array_name, shape=[s.stop - s.start for s in array_slice], dtype=dtype)
shared_array = sa.attach(array_name)
self.pool.map(work_get_slice, blocks, repeat(array_name), repeat(offset), repeat(s3_bucket),
repeat(s3_key), repeat(shape), repeat(dtype))
sa.delete(array_name)
return shared_array
def get_slice_by_bbox(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals
"""Gets a slice of the nd array stored in S3 by bounding box.
:param tuple array_slice: tuple of slices to retrieve.
:param tuple shape: Shape of the stored data.
:param numpy.dtype: dtype of the stored data.
:param str s3_bucket: S3 bucket name
:param str s3_key: S3 key name
:return: Returns the data slice.
"""
# Todo:
# - parallelise reads and writes
# - option 1. use get_byte_range_mp
# - option 2. smarter byte range subsets depending on:
# - data size
# - data contiguity
item_size = np.dtype(dtype).itemsize
s3_begin = (np.ravel_multi_index(tuple([s.start for s in array_slice]), shape)) * item_size
s3_end = (np.ravel_multi_index(tuple([s.stop - 1 for s in array_slice]), shape) + 1) * item_size
# if s3_end-s3_begin <= 5*1024*1024:
# d = self.s3io.get_byte_range(s3_bucket, s3_key, s3_begin, s3_end)
# else:
# d = self.s3io.get_byte_range_mp(s3_bucket, s3_key, s3_begin, s3_end, 5*1024*1024)
d = self.s3io.get_bytes(s3_bucket, s3_key)
if self.enable_compression:
cctx = zstd.ZstdDecompressor()
d = cctx.decompress(d)
d = np.frombuffer(d, dtype=np.uint8, count=-1, offset=0)
d = d[s3_begin:s3_end]
cdim = self.cdims(array_slice, shape)
try:
end = cdim[::-1].index(False) + 1
except ValueError:
end = len(shape)
start = len(shape) - end
outer = array_slice[:-end]
outer_ranges = [range(s.start, s.stop) for s in outer]
outer_cells = list(product(*outer_ranges))
blocks = list(zip(outer_cells, repeat(array_slice[start:])))
item_size = np.dtype(dtype).itemsize
results = []
for cell, sub_range in blocks:
s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size
s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size
data = d[s3_start - s3_begin:s3_end - s3_begin]
results.append((cell, sub_range, data))
result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)
offset = [s.start for s in array_slice]
for cell, sub_range, data in results:
t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in
zip(cell + tuple(sub_range), offset)]
if data.dtype != dtype:
data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)
result[t] = data.reshape([s.stop - s.start for s in sub_range])
return result
| 39.452899
| 113
| 0.612545
| 10,486
| 0.96299
| 0
| 0
| 0
| 0
| 0
| 0
| 3,451
| 0.316925
|
e1e4a6c549324fabd37261ecd95b7fc5b7e7bd39
| 5,447
|
py
|
Python
|
make_snapshot.py
|
trquinn/ICgen
|
0d7f05187a955be7fa3dee2f638cfcb074ebadcd
|
[
"MIT"
] | 1
|
2021-09-14T12:03:03.000Z
|
2021-09-14T12:03:03.000Z
|
make_snapshot.py
|
trquinn/ICgen
|
0d7f05187a955be7fa3dee2f638cfcb074ebadcd
|
[
"MIT"
] | null | null | null |
make_snapshot.py
|
trquinn/ICgen
|
0d7f05187a955be7fa3dee2f638cfcb074ebadcd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 21 15:11:31 2014
@author: ibackus
"""
__version__ = "$Revision: 1 $"
# $Source$
import pynbody
SimArray = pynbody.array.SimArray
import numpy as np
import gc
import os
import isaac
import calc_velocity
import ICgen_utils
import ICglobal_settings
global_settings = ICglobal_settings.global_settings
def snapshot_gen(ICobj):
"""
Generates a tipsy snapshot from the initial conditions object ICobj.
Returns snapshot, param
snapshot: tipsy snapshot
param: dictionary containing info for a .param file
"""
print 'Generating snapshot...'
# Constants
G = SimArray(1.0,'G')
# ------------------------------------
# Load in things from ICobj
# ------------------------------------
print 'Accessing data from ICs'
settings = ICobj.settings
# filenames
snapshotName = settings.filenames.snapshotName
paramName = settings.filenames.paramName
# particle positions
r = ICobj.pos.r
xyz = ICobj.pos.xyz
# Number of particles
nParticles = ICobj.pos.nParticles
# molecular mass
m = settings.physical.m
# star mass
m_star = settings.physical.M.copy()
# disk mass
m_disk = ICobj.sigma.m_disk.copy()
m_disk = isaac.match_units(m_disk, m_star)[0]
# mass of the gas particles
m_particles = m_disk / float(nParticles)
# re-scale the particles (allows making of lo-mass disk)
m_particles *= settings.snapshot.mScale
# -------------------------------------------------
# Assign output
# -------------------------------------------------
print 'Assigning data to snapshot'
# Get units all set up
m_unit = m_star.units
pos_unit = r.units
if xyz.units != r.units:
xyz.convert_units(pos_unit)
# time units are sqrt(L^3/GM)
t_unit = np.sqrt((pos_unit**3)*np.power((G*m_unit), -1)).units
# velocity units are L/t
v_unit = (pos_unit/t_unit).ratio('km s**-1')
# Make it a unit
v_unit = pynbody.units.Unit('{0} km s**-1'.format(v_unit))
# Other settings
metals = settings.snapshot.metals
star_metals = metals
# -------------------------------------------------
# Initialize snapshot
# -------------------------------------------------
# Note that empty pos, vel, and mass arrays are created in the snapshot
snapshot = pynbody.new(star=1,gas=nParticles)
snapshot['vel'].units = v_unit
snapshot['eps'] = 0.01*SimArray(np.ones(nParticles+1, dtype=np.float32), pos_unit)
snapshot['metals'] = SimArray(np.zeros(nParticles+1, dtype=np.float32))
snapshot['rho'] = SimArray(np.zeros(nParticles+1, dtype=np.float32))
snapshot.gas['pos'] = xyz
snapshot.gas['temp'] = ICobj.T(r)
snapshot.gas['mass'] = m_particles
snapshot.gas['metals'] = metals
snapshot.star['pos'] = SimArray([[ 0., 0., 0.]],pos_unit)
snapshot.star['vel'] = SimArray([[ 0., 0., 0.]], v_unit)
snapshot.star['mass'] = m_star
snapshot.star['metals'] = SimArray(star_metals)
# Estimate the star's softening length as the closest particle distance
snapshot.star['eps'] = r.min()
# Make param file
param = isaac.make_param(snapshot, snapshotName)
param['dMeanMolWeight'] = m
gc.collect()
# -------------------------------------------------
# CALCULATE VELOCITY USING calc_velocity.py. This also estimates the
# gravitational softening length eps
# -------------------------------------------------
print 'Calculating circular velocity'
preset = settings.changa_run.preset
max_particles = global_settings['misc']['max_particles']
calc_velocity.v_xy(snapshot, param, changa_preset=preset, max_particles=max_particles)
gc.collect()
# -------------------------------------------------
# Estimate time step for changa to use
# -------------------------------------------------
# Save param file
isaac.configsave(param, paramName, 'param')
# Save snapshot
snapshot.write(filename=snapshotName, fmt=pynbody.tipsy.TipsySnap)
# est dDelta
dDelta = ICgen_utils.est_time_step(paramName, preset)
param['dDelta'] = dDelta
# -------------------------------------------------
# Create director file
# -------------------------------------------------
# largest radius to plot
r_director = float(0.9 * r.max())
# Maximum surface density
sigma_min = float(ICobj.sigma(r_director))
# surface density at largest radius
sigma_max = float(ICobj.sigma.input_dict['sigma'].max())
# Create director dict
director = isaac.make_director(sigma_min, sigma_max, r_director, filename=param['achOutName'])
## Save .director file
#isaac.configsave(director, directorName, 'director')
# -------------------------------------------------
# Wrap up
# -------------------------------------------------
print 'Wrapping up'
# Now set the star particle's tform to a negative number. This allows
# UW ChaNGa treat it as a sink particle.
snapshot.star['tform'] = -1.0
# Update params
r_sink = isaac.strip_units(r.min())
param['dSinkBoundOrbitRadius'] = r_sink
param['dSinkRadius'] = r_sink
param['dSinkMassMin'] = 0.9 * isaac.strip_units(m_star)
param['bDoSinks'] = 1
return snapshot, param, director
| 33.213415
| 98
| 0.572242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,414
| 0.44318
|
e1e5f2d6ad3305b63d32e9bc867c960c34b149c1
| 8,243
|
py
|
Python
|
diag_rank_update.py
|
IPA-HD/ldaf_classification
|
e7cd08c59d3be2cf961cf6f546ef9b375c9d96c5
|
[
"MIT"
] | null | null | null |
diag_rank_update.py
|
IPA-HD/ldaf_classification
|
e7cd08c59d3be2cf961cf6f546ef9b375c9d96c5
|
[
"MIT"
] | null | null | null |
diag_rank_update.py
|
IPA-HD/ldaf_classification
|
e7cd08c59d3be2cf961cf6f546ef9b375c9d96c5
|
[
"MIT"
] | 1
|
2022-02-23T16:13:04.000Z
|
2022-02-23T16:13:04.000Z
|
"""
Diagonal Matrix with rank-1 updates.
"""
import itertools
import torch
from torch.functional import Tensor
class DiagRankUpdate(object):
"""Diagonal Matrix with rank-1 updates"""
def __init__(self, diag, rankUpdates):
super(DiagRankUpdate, self).__init__()
self.diag = diag
self.rankUpdates = rankUpdates
assert rankUpdates.ndim == 3
assert rankUpdates.shape[1] == 2
assert rankUpdates.shape[2] == diag.shape[0]
assert rankUpdates.device == diag.device
assert rankUpdates.dtype == diag.dtype
@property
def dtype(self):
return self.diag.dtype
@property
def ndim(self):
return 2
def __repr__(self) -> str:
return "{0}×{0} DiagonalMatrix with {1} Rank-1 Update".format(
self.diag.size()[0],
len(self.rankUpdates)
) + ("s" if len(self.rankUpdates)!=1 else "")
def tensor(self):
return torch.diag(self.diag) + torch.matmul(self.rankUpdates[:,0,:].t(), self.rankUpdates[:,1,:])
def device(self):
return self.diag.device
def dim(self):
return 2
def size(self):
return torch.Size([
self.diag.size()[0],
self.diag.size()[0]
])
def t(self):
return DiagRankUpdate(self.diag.clone(), torch.flip(self.rankUpdates, (1,)))
def add(self, other):
if type(other) != DiagRankUpdate:
return torch.add(self.tensor(), other)
return DiagRankUpdate(
self.diag + other.diag,
torch.cat((self.rankUpdates, other.rankUpdates))
)
def __add__(self, other):
return self.add(other)
def __radd__(self, other):
return other.add(self)
def negative(self):
return DiagRankUpdate(
-self.diag,
self.rankUpdates * torch.tensor([[-1, 1]])
)
def __sub__(self, other):
return self.add(other.negative())
def __rsub__(self, other):
return other.add(self.negative())
def matmul(self, other):
if type(other) != DiagRankUpdate:
return torch.mul(self.tensor(), other)
return DiagRankUpdate(
self.diag * other.diag,
torch.cat((
torch.cat(
(
self.diag[None, None, :] * other.rankUpdates[:, (0,), :],
other.rankUpdates[:, (1,), :]
),
dim = 1
),
torch.cat(
(
self.rankUpdates[:, (0,), :],
other.diag[None, None, :] * self.rankUpdates[:, (1,), :]
),
dim=1
),
torch.stack([
torch.stack((s[1].dot(o[0]) * s[0], o[1])) for s, o in itertools.product(
self.rankUpdates,
other.rankUpdates
)]
)
))
)
def batchDot(self, v):
"""
Batched multiplication self @ v
with batch of matrices v (batch_size, n, k)
"""
assert v.ndim == 3
assert v.shape[1] == self.rankUpdates.shape[2]
n = v.shape[1]
diag_bmm = self.diag.reshape((1, n, 1))*v
inner_prod = torch.matmul(self.rankUpdates[:,1,:].unsqueeze(0), v)
# inner_prod now has shape (batch_size, n_updates, k)
outer_prod = torch.matmul(
self.rankUpdates[:,0,:].t().unsqueeze(0),
inner_prod
)
# outer_prod now has shape (batch_size, n, k)
return diag_bmm + outer_prod
def batchDotTransposed(self, v):
"""
Batched multiplication self.t() @ v
with batch of matrices v (batch_size, n, k)
"""
assert v.ndim == 3
assert v.shape[1] == self.rankUpdates.shape[2]
n = v.shape[1]
diag_bmm = self.diag.reshape((1, n, 1))*v
inner_prod = torch.matmul(self.rankUpdates[:,0,:].unsqueeze(0), v)
# inner_prod now has shape (batch_size, n_updates, k)
outer_prod = torch.matmul(
self.rankUpdates[:,1,:].t().unsqueeze(0),
inner_prod
)
# outer_prod now has shape (batch_size, n, k)
return diag_bmm + outer_prod
def dotRight(self, other):
"""
Multiply self @ other
"""
return self.diag * other + torch.matmul(
torch.matmul( self.rankUpdates[:,1,:] , other ),
self.rankUpdates[:,0,:]
)
def dotLeft(self, other):
"""
Multiply other @ self
"""
return self.diag * other + torch.matmul(
torch.matmul( self.rankUpdates[:,0,:] , other ),
self.rankUpdates[:,1,:]
)
def dotBoth(self, v, w):
"""
Let A be self and v, w ∈ ℝⁿ. Then `dotBoth` computes
vᵀ A w
"""
return (self.diag * v * w).sum() + torch.dot(
torch.matmul(self.rankUpdates[:, 0, :], v),
torch.matmul(self.rankUpdates[:, 1, :], w)
)
def trace(self):
return self.diag.sum() + sum([torch.dot(r[0], r[1]) for r in self.rankUpdates])
def appendUpdate(self, other):
return DiagRankUpdate(
self.diag.clone(),
torch.cat((self.rankUpdates, other[None, :, :]))
)
def inverse(self):
if self.rankUpdates.shape[0] == 0:
return DiagRankUpdate(
1 / self.diag,
torch.empty((0,2,self.size()[0]), device=self.device())
)
else:
inv = DiagRankUpdate(self.diag, self.rankUpdates[0:-1, :, :]).inverse()
v = self.rankUpdates[-1,0,:]
w = self.rankUpdates[-1,1,:]
return inv.appendUpdate(
torch.stack((
inv.dotRight(v).negative(),
inv.dotLeft(w) / (
1 + inv.dotBoth(w, v)
)
))
)
def det(self):
if self.rankUpdates.shape[0] == 0:
return self.diag.prod()
else:
reduced = DiagRankUpdate(
self.diag,
self.rankUpdates[0:-1, :, :]
)
v = self.rankUpdates[-1, 0, :]
w = self.rankUpdates[-1, 1, :]
return (1 + reduced.inverse().dotBoth(w, v)) * reduced.det()
def log_det(self):
if self.rankUpdates.shape[0] == 0:
return self.diag.log().sum()
else:
reduced = DiagRankUpdate(
self.diag,
self.rankUpdates[0:-1, :, :]
)
v = self.rankUpdates[-1, 0, :]
w = self.rankUpdates[-1, 1, :]
return torch.log(1 + reduced.inverse().dotBoth(w, v)) + reduced.log_det()
def kl_divergence(self, other, mu0=None, mu1=None):
inv = other.inverse()
if not mu0 is None:
mu1mu0 = mu1 - mu0
return (
inv.matmul(self).trace()
+
inv.dotBoth(mu1mu0, mu1mu0)
-
self.size()[0]
+
other.log_det() - self.log_det()
) / 2
kl = (
inv.matmul(self).trace()
-
self.size()[0]
+
other.log_det() - self.log_det()
) / 2
if kl < 0:
print("Warning, KL was < 0.", kl)
return kl
def projectionBoth(self):
n = self.size()[0]
ones = -torch.ones(n) / n
a = self.rankUpdates[:,0,:]
b = self.rankUpdates[:,1,:]
a_sum = a.sum(dim=1)
b_sum = b.sum(dim=1)
return self.appendUpdate(
torch.stack((
ones,
self.diag + a_sum @ b
))
).appendUpdate(
torch.stack((
self.diag + b_sum @ a,
ones
))
).appendUpdate(
torch.stack((
(self.diag.sum() + (a_sum * b_sum).sum()) * ones,
ones,
))
)
| 28.922807
| 105
| 0.474463
| 8,140
| 0.986428
| 0
| 0
| 109
| 0.013209
| 0
| 0
| 767
| 0.092947
|
e1e66cd3308883f2371baad138a10eed2eac4eff
| 4,074
|
py
|
Python
|
tests/garage/tf/policies/test_gaussian_mlp_policy_with_model.py
|
XavierJingfeng/starter
|
274566e491d5c7157f3c8deff136c56838022349
|
[
"MIT"
] | null | null | null |
tests/garage/tf/policies/test_gaussian_mlp_policy_with_model.py
|
XavierJingfeng/starter
|
274566e491d5c7157f3c8deff136c56838022349
|
[
"MIT"
] | null | null | null |
tests/garage/tf/policies/test_gaussian_mlp_policy_with_model.py
|
XavierJingfeng/starter
|
274566e491d5c7157f3c8deff136c56838022349
|
[
"MIT"
] | null | null | null |
import pickle
from unittest import mock
from nose2.tools.params import params
import numpy as np
import tensorflow as tf
from garage.tf.envs import TfEnv
from garage.tf.policies import GaussianMLPPolicyWithModel
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.models import SimpleGaussianMLPModel
class TestGaussianMLPPolicyWithModel(TfGraphTestCase):
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_get_action(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
action, prob = policy.get_action(obs)
expected_action = np.full(action_dim, 0.75)
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
actions, probs = policy.get_actions([obs, obs, obs])
for action, mean, log_std in zip(actions, probs['mean'],
probs['log_std']):
assert env.action_space.contains(action)
assert np.array_equal(action, expected_action)
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_dist_info_sym(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
obs_ph = tf.placeholder(tf.float32, shape=(None, obs_dim))
dist1_sym = policy.dist_info_sym(obs_ph, name='p1_sym')
expected_mean = np.full(action_dim, 0.5)
expected_log_std = np.full(action_dim, 0.5)
prob = self.sess.run(dist1_sym, feed_dict={obs_ph: [obs.flatten()]})
assert np.array_equal(prob['mean'], expected_mean)
assert np.array_equal(prob['log_std'], expected_log_std)
@params(
((1, ), (1, )),
((1, ), (2, )),
((2, ), (2, )),
((1, 1), (1, 1)),
((1, 1), (2, 2)),
((2, 2), (2, 2)),
)
def test_is_pickleable(self, obs_dim, action_dim):
env = TfEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
with mock.patch(('garage.tf.policies.'
'gaussian_mlp_policy_with_model.GaussianMLPModel'),
new=SimpleGaussianMLPModel):
policy = GaussianMLPPolicyWithModel(env_spec=env.spec)
env.reset()
obs, _, _, _ = env.step(1)
obs_dim = env.spec.observation_space.flat_dim
action1, prob1 = policy.get_action(obs)
p = pickle.dumps(policy)
with tf.Session(graph=tf.Graph()):
policy_pickled = pickle.loads(p)
action2, prob2 = policy_pickled.get_action(obs)
assert env.action_space.contains(action1)
assert np.array_equal(action1, action2)
assert np.array_equal(prob1['mean'], prob2['mean'])
assert np.array_equal(prob1['log_std'], prob2['log_std'])
| 35.736842
| 76
| 0.59352
| 3,707
| 0.909917
| 0
| 0
| 3,636
| 0.892489
| 0
| 0
| 308
| 0.075601
|
e1e7670f03c464a40b12de227929a84b71ca6496
| 3,015
|
py
|
Python
|
cloudify_gcp/monitoring/stackdriver_uptimecheck.py
|
cloudify-cosmo/cloudify-gcp-plugin
|
c70faee0555070f7fc67f0001395eaafb681b23c
|
[
"Apache-2.0"
] | 4
|
2016-10-24T17:42:07.000Z
|
2020-05-31T00:34:07.000Z
|
cloudify_gcp/monitoring/stackdriver_uptimecheck.py
|
cloudify-cosmo/cloudify-gcp-plugin
|
c70faee0555070f7fc67f0001395eaafb681b23c
|
[
"Apache-2.0"
] | 35
|
2015-04-30T20:14:01.000Z
|
2022-02-03T21:35:54.000Z
|
cloudify_gcp/monitoring/stackdriver_uptimecheck.py
|
cloudify-cosmo/cloudify-gcp-plugin
|
c70faee0555070f7fc67f0001395eaafb681b23c
|
[
"Apache-2.0"
] | 13
|
2015-04-17T16:42:03.000Z
|
2021-06-24T04:12:14.000Z
|
# #######
# Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
from cloudify_gcp.gcp import check_response
from .. import utils
from .. import constants
from ..monitoring import MonitoringBase
class StackDriverUpTimeCheckConfig(MonitoringBase):
def __init__(self, config, logger,
project_id=None, uptime_check_config=None, name=None):
super(StackDriverUpTimeCheckConfig, self).__init__(
config,
logger,
project_id,
None)
self.project_id = project_id
self.uptime_check_config = uptime_check_config
self.name = name
@check_response
def create(self):
return self.discovery_uptime_check.create(
parent='projects/{}'.format(self.project_id),
body=self.uptime_check_config).execute()
@check_response
def delete(self):
return self.discovery_uptime_check.delete(name=self.name).execute()
@check_response
def update(self):
return self.discovery_uptime_check.update(
name=self.name,
body=self.uptime_check_config).execute()
@operation(resumable=True)
@utils.throw_cloudify_exceptions
def create(project_id, uptime_check_config, **kwargs):
if utils.resource_created(ctx, constants.NAME):
return
gcp_config = utils.get_gcp_config()
group = StackDriverUpTimeCheckConfig(
gcp_config, ctx.logger,
project_id=project_id, uptime_check_config=uptime_check_config)
resource = utils.create(group)
ctx.instance.runtime_properties[constants.NAME] = resource[constants.NAME]
@operation(resumable=True)
@utils.retry_on_failure('Retrying deleting stackdriver group')
@utils.throw_cloudify_exceptions
def delete(**kwargs):
gcp_config = utils.get_gcp_config()
props = ctx.instance.runtime_properties
if props.get(constants.NAME):
group = StackDriverUpTimeCheckConfig(
gcp_config, ctx.logger, name=props[constants.NAME])
utils.delete_if_not_external(group)
@operation(resumable=True)
@utils.throw_cloudify_exceptions
def update(project_id, uptime_check_config, **kwargs):
gcp_config = utils.get_gcp_config()
uptime_check = StackDriverUpTimeCheckConfig(
gcp_config, ctx.logger, project_id, uptime_check_config,
name=ctx.instance.runtime_properties[constants.NAME])
uptime_check.update()
| 33.5
| 78
| 0.725373
| 920
| 0.305141
| 0
| 0
| 1,737
| 0.576119
| 0
| 0
| 664
| 0.220232
|
e1e7fd1d9bbf595b4d131e3b6ac6e686c46e866f
| 2,041
|
py
|
Python
|
tests/test_database.py
|
penggan666/index_selection_evaluation
|
b6daf1f30c24a0675f4e3acfbd17304e5d91cfd6
|
[
"MIT"
] | 37
|
2020-03-03T10:59:06.000Z
|
2022-03-29T11:51:37.000Z
|
tests/test_database.py
|
Jiachen-Shi/index_selection_evaluation
|
fb22b929cbab22377e90a12ae23ea4002d8eab7b
|
[
"MIT"
] | 19
|
2020-03-10T14:55:56.000Z
|
2021-05-20T09:54:32.000Z
|
tests/test_database.py
|
Jiachen-Shi/index_selection_evaluation
|
fb22b929cbab22377e90a12ae23ea4002d8eab7b
|
[
"MIT"
] | 14
|
2020-08-10T03:12:40.000Z
|
2022-02-28T06:08:16.000Z
|
import unittest
from selection.dbms.postgres_dbms import PostgresDatabaseConnector
from selection.index import Index
from selection.table_generator import TableGenerator
from selection.workload import Column, Query, Table
class TestDatabase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.db_name = "tpch_test_db_database"
db = PostgresDatabaseConnector(None, autocommit=True)
TableGenerator("tpch", 0.001, db, explicit_database_name=cls.db_name)
db.close()
@classmethod
def tearDownClass(cls):
connector = PostgresDatabaseConnector(None, autocommit=True)
if connector.database_exists(cls.db_name):
connector.drop_database(cls.db_name)
def test_postgres_index_simulation(self):
db = PostgresDatabaseConnector(self.db_name, "postgres")
self.assertTrue(db.supports_index_simulation())
db.close()
def test_simple_statement(self):
db = PostgresDatabaseConnector(self.db_name, "postgres")
statement = "select count(*) from nation"
result = db.exec_fetch(statement)
self.assertEqual(result[0], 25)
db.close()
def test_runtime_data_logging(self):
db = PostgresDatabaseConnector(self.db_name, "postgres")
query = Query(17, "SELECT count(*) FROM nation;")
db.get_cost(query)
self.assertEqual(db.cost_estimations, 1)
self.assertGreater(db.cost_estimation_duration, 0)
column_n_name = Column("n_name")
nation_table = Table("nation")
nation_table.add_column(column_n_name)
index = Index([column_n_name])
index_oid = db.simulate_index(index)[0]
self.assertGreater(db.index_simulation_duration, 0)
self.assertEqual(db.simulated_indexes, 1)
previou_simulation_duration = db.index_simulation_duration
db.drop_simulated_index(index_oid)
self.assertGreater(db.index_simulation_duration, previou_simulation_duration)
if __name__ == "__main__":
unittest.main()
| 34.016667
| 85
| 0.707496
| 1,766
| 0.865262
| 0
| 0
| 451
| 0.22097
| 0
| 0
| 144
| 0.070554
|
e1e8c509d815e0208974db78a033ef909fdca7d8
| 2,012
|
py
|
Python
|
aljson/__init__.py
|
hrzp/aljson
|
83cab23f9466c8ca5803dba7d5ec998646ff0436
|
[
"MIT"
] | 1
|
2020-02-02T11:33:29.000Z
|
2020-02-02T11:33:29.000Z
|
aljson/__init__.py
|
hrzp/aljson
|
83cab23f9466c8ca5803dba7d5ec998646ff0436
|
[
"MIT"
] | null | null | null |
aljson/__init__.py
|
hrzp/aljson
|
83cab23f9466c8ca5803dba7d5ec998646ff0436
|
[
"MIT"
] | null | null | null |
from sqlalchemy.orm.collections import InstrumentedList
class BaseMixin:
caller_stack = list()
def extract_relations(self):
return self.__mapper__.relationships.keys()
def extract_columns(self):
return self.__mapper__.columns.keys()
def get_columns(self):
result = dict()
result['relationships'] = self.extract_relations()
result['columns'] = self.extract_columns()
return result
def convert_columns_to_dict(self, columns):
result = dict()
for item in columns:
result[item] = getattr(self, item)
return result
def convert_instrumented_list(self, items):
result = list()
for item in items:
result.append(item.to_json(self.caller_stack))
return result
def detect_class_name(self, item):
if item.__class__.__name__ == 'InstrumentedList':
return item[0].__class__.__name__.lower()
return item.__class__.__name__.lower()
def convert_relations_to_dict(self, relations):
result = dict()
me = self.__class__.__name__.lower()
self.caller_stack.append(me)
for relation in relations:
obj = getattr(self, relation)
if self.detect_class_name(obj) in self.caller_stack:
continue
if type(obj) == InstrumentedList:
result[relation] = self.convert_instrumented_list(obj)
continue
result[relation] = obj.to_json(self.caller_stack)
return result
def to_json(self, caller_stack=None):
'''
Convert a SqlAlchemy query object to a dict(json)
'''
self.caller_stack = [] if not caller_stack else caller_stack
final_obj = dict()
columns = self.get_columns()
final_obj.update(self.convert_columns_to_dict(columns['columns']))
final_obj.update(self.convert_relations_to_dict(
columns['relationships']))
return final_obj
| 31.936508
| 74
| 0.630716
| 1,953
| 0.970676
| 0
| 0
| 0
| 0
| 0
| 0
| 143
| 0.071074
|
e1e8ce55d278ecec5ff0a778a7af4a2bbb524f3a
| 1,274
|
py
|
Python
|
src/robotide/context/coreplugins.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-06-27T08:48:24.000Z
|
2019-06-27T08:48:24.000Z
|
src/robotide/context/coreplugins.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robotide/context/coreplugins.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_core_plugins():
from robotide.run import RunAnything
from robotide.recentfiles import RecentFilesPlugin
from robotide.ui.preview import PreviewPlugin
from robotide.ui.keywordsearch import KeywordSearch
from robotide.editor import EditorPlugin
from robotide.editor.texteditor import TextEditorPlugin
from robotide.log import LogPlugin
from robotide.searchtests.searchtests import TestSearchPlugin
from robotide.spec.specimporter import SpecImporterPlugin
return [RunAnything, RecentFilesPlugin, PreviewPlugin, SpecImporterPlugin,
EditorPlugin, TextEditorPlugin, KeywordSearch, LogPlugin, TestSearchPlugin]
| 42.466667
| 87
| 0.78022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 592
| 0.464678
|
e1e993089a256f12c7dadf856a619e12a83973e8
| 918
|
py
|
Python
|
backend/apps/api/system/v1/serializers/groups.py
|
offurface/smsta
|
b8a1f2e6efe6c71703c8d57e8aae255ad213863c
|
[
"MIT"
] | null | null | null |
backend/apps/api/system/v1/serializers/groups.py
|
offurface/smsta
|
b8a1f2e6efe6c71703c8d57e8aae255ad213863c
|
[
"MIT"
] | null | null | null |
backend/apps/api/system/v1/serializers/groups.py
|
offurface/smsta
|
b8a1f2e6efe6c71703c8d57e8aae255ad213863c
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from ... import models
class DepartmentSerializers(serializers.ModelSerializer):
"""
Сериализатор кафедр
"""
class Meta:
model = models.Department
fields = ["short_name", "full_name"]
class StudentSerializers(serializers.ModelSerializer):
"""
Сериализатор студентов
"""
class Meta:
model = models.Student
fields = ["pk", "name", "surname", "patronymic", "gender"]
class AcademicGroupsDetailSerializers(serializers.ModelSerializer):
"""
Сериализатор Академических Групп
"""
department = DepartmentSerializers()
students = StudentSerializers(many=True, read_only=True)
class Meta:
model = models.AcademicGroup
fields = [
"pk",
"start_date",
"department",
"name",
"students",
"course",
]
| 21.348837
| 67
| 0.606754
| 916
| 0.928065
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.308004
|
e1ea50469b885baae0f3ea29526541040d09f40f
| 6,629
|
py
|
Python
|
cinebot_mini/web_utils/blender_client.py
|
cheng-chi/cinebot_mini
|
708a7c80d2f203dfe3b52bf84d9cbafac7673d27
|
[
"MIT"
] | null | null | null |
cinebot_mini/web_utils/blender_client.py
|
cheng-chi/cinebot_mini
|
708a7c80d2f203dfe3b52bf84d9cbafac7673d27
|
[
"MIT"
] | null | null | null |
cinebot_mini/web_utils/blender_client.py
|
cheng-chi/cinebot_mini
|
708a7c80d2f203dfe3b52bf84d9cbafac7673d27
|
[
"MIT"
] | null | null | null |
from cinebot_mini import SERVERS
import requests
import numpy as np
import json
def base_url():
blender_dict = SERVERS["blender"]
url = "http://{}:{}".format(
blender_dict["host"], blender_dict["port"])
return url
def handshake():
url = base_url() + "/api/ping"
for i in range(5):
try:
r = requests.get(url, timeout=1.0)
r_data = r.json()
assert(r_data["url"] == "/api/ping")
return True
except Exception as e:
continue
return False
def create_object(name, type="CAMERA"):
url = base_url() + "/api/create"
data = {
"type": type,
"name": name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
return obj_dict["name"]
else:
print("Creating {} failed!", obj_name)
def create_objects(type="CAMERA", num=4, base_name="screen_camera_"):
url = base_url() + "/api/create"
obj_names = []
for i in range(num):
obj_name = base_name + str(i)
data = {
"type": type,
"name": obj_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
obj_dict = r_data['result']
if "name" in obj_dict:
obj_names.append(obj_dict["name"])
else:
print("Creating {} failed!", obj_name)
return obj_names
def set_transform_euler(obj_name, loc, rot, degree=True):
url = base_url() + "/api/object/" + obj_name + "/property"
rot_data = list(rot)
if degree:
rot_data = (np.array(rot) / 180.0 * np.pi).tolist()
data = {
"properties": {
"location": list(loc),
"rotation_euler": list(rot_data)
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_transform_matrix(obj_name, matrix):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
"properties": {
"matrix_world": matrix.tolist()
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_property(obj_name, key, val, prop_type="properties"):
url = base_url() + "/api/object/" + obj_name + "/property"
data = {
prop_type: {
key: val
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_property(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
r = requests.get(url)
r_data = r.json()
return r_data["result"]
def test_object_exist(obj_name):
url = base_url() + "/api/object/" + obj_name + "/property"
data = dict()
r = requests.get(url, data=json.dumps(data))
return r.status_code != 404
def set_animation_euler(obj_name, locs, rots, degree=True):
url = base_url() + "/api/object/" + obj_name + "/animation"
rot_data = rots
if degree:
rot_data = rots / 180.0 * np.pi
transforms = []
for t in range(len(locs)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["location"] = locs[t].tolist()
tf_data["rotation_euler"] = rot_data[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_animation_matrix(obj_name, matrices):
url = base_url() + "/api/object/" + obj_name + "/animation"
transforms = []
for t in range(len(matrices)):
tf_data = dict()
tf_data["frame_number"] = t
tf_data["matrix_world"] = matrices[t].tolist()
transforms.append(tf_data)
data = {
"transforms": transforms
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def get_animation_dict(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = dict()
for frame in animation:
t = frame["frame_number"]
arr = np.array(frame["matrix_world"])
result[t] = arr
return result
def get_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.get(url)
r_data = r.json()
animation = r_data["result"]
result = []
for frame in animation:
arr = np.array(frame["matrix_world"])
result.append(arr)
return result
def delete_animation(obj_name):
url = base_url() + "/api/object/" + obj_name + "/animation"
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def delete_object(obj_name):
url = base_url() + "/api/object/" + obj_name
r = requests.delete(url)
r_data = r.json()
return r_data["result"]
def render_animation(file_name, frame_start, frame_end):
url = base_url() + "/api/render/animation"
data = {
"output_file_path": file_name,
"frame_start": frame_start,
"frame_end": frame_end
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"]
def set_render_resolution(pixel_dim):
url = base_url() + "/api/render/property"
x, y = pixel_dim
data = {
"properties": {
"resolution_x": x,
"resolution_y": y
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_camera_properties(cam_name, focal_length_m, sensor_dims_m):
url = base_url() + "/api/object/" + cam_name + "/property"
lens = focal_length_m * 1000
w, h = np.array(sensor_dims_m) * 1000
data = {
"data_properties": {
"lens": lens,
"sensor_width": w,
"sensor_height": h
}
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
def set_active_camera(cam_name):
url = base_url() + "/api/render/active_camera"
data = {
"name": cam_name
}
r = requests.put(url, data=json.dumps(data))
r_data = r.json()
return r_data["result"] == "SUCCESS"
| 25.996078
| 69
| 0.577312
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,140
| 0.171972
|
e1eb5b5cf0257ffeb6de52c29326fb2195c7a273
| 6,733
|
py
|
Python
|
gem5-configs/configs-microbench-tests/run_controlbenchmarks.py
|
TCHERNET/parsec-tests2
|
775b299a890d0d552877ed510240aa59c630eaa3
|
[
"BSD-3-Clause"
] | 5
|
2020-05-20T12:24:29.000Z
|
2021-07-20T01:49:30.000Z
|
gem5-configs/configs-microbench-tests/run_controlbenchmarks.py
|
TCHERNET/parsec-tests2
|
775b299a890d0d552877ed510240aa59c630eaa3
|
[
"BSD-3-Clause"
] | 26
|
2020-04-03T15:01:48.000Z
|
2021-06-09T19:08:31.000Z
|
gem5-configs/configs-microbench-tests/run_controlbenchmarks.py
|
TCHERNET/parsec-tests2
|
775b299a890d0d552877ed510240aa59c630eaa3
|
[
"BSD-3-Clause"
] | 3
|
2020-07-04T14:51:29.000Z
|
2021-09-16T04:33:45.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 The Regents of the University of California
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Lowe-Power
from __future__ import print_function
import argparse
import m5
from m5.objects import TimingSimpleCPU, DerivO3CPU
from m5.objects import SimpleIndirectPredictor, LocalBP, BiModeBP, TournamentBP, LTAGE, SimpleMemory
from m5.objects import Root
from m5.objects import *
from system import BaseTestSystem
from system import InfMemory, SingleCycleMemory, SlowMemory
# Branch predictor params
# If indirect Predictor is disabled use BTB with these params
btbEntries = 512
btbTagSize = 19
class IndirectPred(SimpleIndirectPredictor):
indirectSets = 256 # Cache sets for indirect predictor
indirectWays = 2 # Ways for indirect predictor
indirectTagSize = 16 # Indirect target cache tag bits
indirectPathLength = 3 # Previous indirect targets to use for path history
indirectGHRBits = 13 # Indirect GHR number of bits
ipred = SimpleIndirectPredictor()
#CPU Configs
class Simple_LocalBP(TimingSimpleCPU):
branchPred = LocalBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
class DefaultO3_LocalBP(DerivO3CPU):
branchPred = LocalBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
class Simple_BiModeBP(TimingSimpleCPU):
branchPred = BiModeBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class DefaultO3_BiModeBP(DerivO3CPU):
branchPred = BiModeBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class Simple_TournamentBP(TimingSimpleCPU):
branchPred = TournamentBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
branchPred.localHistoryTableSize = 2048
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class DefaultO3_TournamentBP(DerivO3CPU):
branchPred = TournamentBP()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
branchPred.localPredictorSize = 2048
branchPred.localCtrBits = 2
branchPred.localHistoryTableSize = 2048
branchPred.globalPredictorSize = 8192
branchPred.globalCtrBits = 2
branchPred.choicePredictorSize = 8192
branchPred.choiceCtrBits = 2
class Simple_LTAGEBP(TimingSimpleCPU):
branchPred = LTAGE()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
class DefaultO3_LTAGEBP(DerivO3CPU):
branchPred = LTAGE()
branchPred.BTBEntries = btbEntries
branchPred.BTBTagSize = btbTagSize
branchPred.indirectBranchPred = ipred # set this to null to disable indirect predictior
# Add more CPUs Configs under test before this
valid_configs = [Simple_LocalBP, Simple_BiModeBP, Simple_TournamentBP, Simple_LTAGEBP, DefaultO3_LocalBP, DefaultO3_BiModeBP, DefaultO3_TournamentBP, DefaultO3_LTAGEBP]
valid_configs = {cls.__name__[:-2]:cls for cls in valid_configs}
# Add more Memories under test before this
valid_memories = [InfMemory, SingleCycleMemory, SlowMemory]
valid_memories = {cls.__name__[:-6]:cls for cls in valid_memories}
parser = argparse.ArgumentParser()
parser.add_argument('config', choices = valid_configs.keys())
parser.add_argument('memory_model', choices = valid_memories.keys())
parser.add_argument('binary', type = str, help = "Path to binary to run")
args = parser.parse_args()
class MySystem(BaseTestSystem):
_CPUModel = valid_configs[args.config]
_MemoryModel = valid_memories[args.memory_model]
system = MySystem()
system.setTestBinary(args.binary)
root = Root(full_system = False, system = system)
m5.instantiate()
exit_event = m5.simulate()
if exit_event.getCause() != 'exiting with last active thread context':
print("Benchmark failed with bad exit cause.")
print(exit_event.getCause())
exit(1)
if exit_event.getCode() != 0:
print("Benchmark failed with bad exit code.")
print("Exit code {}".format(exit_event.getCode()))
exit(1)
print("{} ms".format(m5.curTick()/1e9))
| 40.077381
| 168
| 0.776771
| 3,345
| 0.496807
| 0
| 0
| 0
| 0
| 0
| 0
| 2,521
| 0.374424
|
e1ebe5e056a585344fff7992dae1cbba59732df5
| 1,273
|
py
|
Python
|
poezio/args.py
|
hrnciar/poezio
|
12b8af11df35dda535412b0c02ba792890095a7d
|
[
"Zlib"
] | null | null | null |
poezio/args.py
|
hrnciar/poezio
|
12b8af11df35dda535412b0c02ba792890095a7d
|
[
"Zlib"
] | null | null | null |
poezio/args.py
|
hrnciar/poezio
|
12b8af11df35dda535412b0c02ba792890095a7d
|
[
"Zlib"
] | null | null | null |
"""
Module related to the argument parsing
There is a fallback to the deprecated optparse if argparse is not found
"""
from pathlib import Path
from argparse import ArgumentParser, SUPPRESS
from poezio.version import __version__
def parse_args(CONFIG_PATH: Path):
"""
Parse the arguments from the command line
"""
parser = ArgumentParser('poezio')
parser.add_argument(
"-c",
"--check-config",
dest="check_config",
action='store_true',
help='Check the config file')
parser.add_argument(
"-d",
"--debug",
dest="debug",
help="The file where debug will be written",
metavar="DEBUG_FILE")
parser.add_argument(
"-f",
"--file",
dest="filename",
default=CONFIG_PATH / 'poezio.cfg',
type=Path,
help="The config file you want to use",
metavar="CONFIG_FILE")
parser.add_argument(
'-v',
'--version',
action='version',
version='Poezio v%s' % __version__,
)
parser.add_argument(
"--custom-version",
dest="custom_version",
help=SUPPRESS,
metavar="VERSION",
default=__version__
)
options = parser.parse_args()
return options
| 24.480769
| 71
| 0.593087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 482
| 0.378633
|
e1ed3b48fe37cb69350c8b6542e4845c264e91f2
| 1,125
|
py
|
Python
|
src/mist/api/poller/schedulers.py
|
vladimir-ilyashenko/mist.api
|
f77c451679732ac1cfdafa85ad023c7c170faec4
|
[
"Apache-2.0"
] | null | null | null |
src/mist/api/poller/schedulers.py
|
vladimir-ilyashenko/mist.api
|
f77c451679732ac1cfdafa85ad023c7c170faec4
|
[
"Apache-2.0"
] | null | null | null |
src/mist/api/poller/schedulers.py
|
vladimir-ilyashenko/mist.api
|
f77c451679732ac1cfdafa85ad023c7c170faec4
|
[
"Apache-2.0"
] | null | null | null |
from celerybeatmongo.schedulers import MongoScheduler
from mist.api.sharding.mixins import ShardManagerMixin
from mist.api.poller.models import PollingSchedule
from mist.api.poller.models import OwnerPollingSchedule
from mist.api.poller.models import CloudPollingSchedule
from mist.api.poller.models import MachinePollingSchedule
import datetime
class PollingScheduler(MongoScheduler):
Model = PollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class OwnerPollingScheduler(MongoScheduler):
Model = OwnerPollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class CloudPollingScheduler(MongoScheduler):
Model = CloudPollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class MachinePollingScheduler(MongoScheduler):
Model = MachinePollingSchedule
UPDATE_INTERVAL = datetime.timedelta(seconds=20)
class ShardedOwnerScheduler(ShardManagerMixin, OwnerPollingScheduler):
pass
class ShardedCloudScheduler(ShardManagerMixin, CloudPollingScheduler):
pass
class ShardedMachineScheduler(ShardManagerMixin, MachinePollingScheduler):
pass
| 26.162791
| 74
| 0.826667
| 755
| 0.671111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e1ee5eb16b3e9a592172165671953d6cc3271d6d
| 13,939
|
py
|
Python
|
datasets/hdd_classif.py
|
valeoai/BEEF
|
f1c5f3708ba91f6402dd05814b76dca1d9012942
|
[
"Apache-2.0"
] | 4
|
2021-05-31T16:53:35.000Z
|
2021-11-30T03:03:34.000Z
|
datasets/hdd_classif.py
|
valeoai/BEEF
|
f1c5f3708ba91f6402dd05814b76dca1d9012942
|
[
"Apache-2.0"
] | 3
|
2022-02-02T20:41:56.000Z
|
2022-02-24T11:47:44.000Z
|
datasets/hdd_classif.py
|
valeoai/BEEF
|
f1c5f3708ba91f6402dd05814b76dca1d9012942
|
[
"Apache-2.0"
] | null | null | null |
from collections import Counter
import json
from pathlib import Path
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from bootstrap.lib.logger import Logger
from bootstrap.datasets import transforms as bootstrap_tf
try:
from .hdd import HDD
except:
from hdd import HDD
class HDDClassif(HDD):
def __init__(self,
dir_data,
split,
win_size,
im_size,
layer, # "goal" or "cause"
frame_position,
traintest_mode,
fps=10,
horizon=2, # in seconds
extract_mode=False,
batch_size=2,
debug=False,
shuffle=False,
pin_memory=False,
nb_threads=0):
self.win_size = win_size
self.frame_position = frame_position
super(HDDClassif, self).__init__(dir_data,
split,
im_size,
fps,
horizon, # in seconds
batch_size,
debug,
shuffle,
pin_memory,
nb_threads)
self.layer = layer
if self.layer == "cause":
self.layer_id = '1'
self.classid_to_ix = [-1, 16, 17, 18, 19, 20, 22]
elif self.layer == "goal":
self.layer_id = '0'
self.classid_to_ix = [-1, 0, 1, 2, 3, 4, 5, 7, 8, 10, 11, 12]
else:
raise ValueError(self.layer)
# The classid 0 is the background class
self.ix_to_classid = dict((ix, classid) for classid, ix in enumerate(self.classid_to_ix))
self.class_freq = self.get_class_freq()
self.collate_fn = bootstrap_tf.Compose([
bootstrap_tf.ListDictsToDictLists(),
bootstrap_tf.StackTensors()
])
self.dir_navig_features = self.dir_processed_annot
self.im_transform = transforms.Compose([transforms.Resize((self.im_h, self.im_w)),
transforms.ToTensor(),
transforms.Normalize(mean = [0.43216, 0.394666, 0.37645],
std = [0.22803, 0.22145, 0.216989])])
self.traintest_mode = traintest_mode
if self.traintest_mode:
self.make_batch_loader = self._make_batch_loader_traintest
else:
self.make_batch_loader = self._make_batch_loader
def classid_to_classname(self, classid):
ix = self.classid_to_ix[classid]
if ix == -1:
return '__background__'
else:
return self.ix_to_event[ix]
def _make_batch_loader(self, batch_size=None, shuffle=None, num_samples=200000):
nb_threads = self.nb_threads
batch_size = self.batch_size if batch_size is None else batch_size
shuffle = self.shuffle if shuffle is None else shuffle
if shuffle:
sampler = data.RandomSampler(self, replacement=True, num_samples=min(num_samples, len(self)))
shuffle = None
else:
sampler = None
batch_loader = data.DataLoader(
dataset=self,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=self.pin_memory,
num_workers=nb_threads,
collate_fn=self.collate_fn,
sampler=sampler)
return batch_loader
def _make_batch_loader_traintest(self, batch_size=None, shuffle=None):
nb_threads = self.nb_threads
batch_size = self.batch_size if batch_size is None else batch_size
num_samples = batch_size*70000
shuffle = self.shuffle if shuffle is None else shuffle
if shuffle:
sampler = data.RandomSampler(self, replacement=True, num_samples=num_samples)
shuffle = None
else:
sampler = None
batch_loader = data.DataLoader(
dataset=self,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=self.pin_memory,
num_workers=nb_threads,
collate_fn=self.collate_fn,
sampler=sampler)
return batch_loader
def build_index(self):
Logger()('Building index for %s split...' % self.split)
split_file = self.dir_data.joinpath(self.split+'.txt')
index = []
session_template = "{0}-{1}-{2}-{3}-{4}"
self.vid_to_index = []
self.vidname_to_vidid = {}
for idx, session_id in enumerate(open(split_file)):
name = session_template.format(session_id[:4],
session_id[4:6],
session_id[6:8],
session_id[8:10],
session_id[10:12])
annot_paths = list(filter(lambda x:name in x.as_posix(),
self.dir_processed_annot.iterdir()))
if len(annot_paths) == 0:
continue
assert len(annot_paths) == 1
annot_path = annot_paths[0]
if annot_path.exists():
frame_annots = sorted(annot_path.iterdir())
frame_annots = [None]*self.frame_position + frame_annots + [None]*(self.win_size-self.frame_position-1) # Zero-padding of the full video, such that each frame can get a context
L = [frame_annots[i:i+self.win_size] for i in range(0, len(frame_annots)-self.win_size+1)]
self.vid_to_index.append((len(index), len(index)+len(L)))
self.vidname_to_vidid[annot_path.name] = len(index)
index += L
# if self.debug:
# index += frame_annots[5000:7000]
# break
# else:
# index += frame_annots
if self.debug and idx==1:
break
Logger()('Done')
return index
def get_class_freq(self):
class_freq_path = self.dir_processed_annot.joinpath('%s_class_freq.json' % self.layer)
if class_freq_path.exists():
Logger()('Loading class frequency')
class_freq = json.load(open(class_freq_path))
Logger()('Loaded class frequency')
else:
Logger()('Computing class frequency')
if self.split != "train":
raise NotImplementedError('Extract class weigths on train set first')
class_freq = self.compute_class_freq()
with open(class_freq_path, 'w') as F:
F.write(json.dumps(class_freq))
return class_freq
def compute_class_freq(self):
class_freq = Counter()
S = 0
for paths in self.index:
annot_path = paths[-1]
if annot_path is None:
continue
annot = json.load(open(annot_path))
event = annot['labels'][self.layer_id]
classid = self.ix_to_classid.get(event, 0)
class_freq[classid] += 1
S += 1
for classid in class_freq:
class_freq[classid] = class_freq[classid] / S
return class_freq
def get_navig(self, annot):
item = {}
if len(annot['prev_xy']) == self.length:
prev_xy = torch.Tensor(annot['prev_xy'])
r_prev_xy = torch.Tensor(annot['r_prev_xy'])
else:
# should be padded before
n = len(annot['prev_xy'])
prev_xy = torch.Tensor(self.length,2).zero_()
r_prev_xy = torch.Tensor(self.length,2).zero_()
if n>0:
prev_xy[self.length - n:] = torch.Tensor(annot['prev_xy'])
r_prev_xy[self.length - n:] = torch.Tensor(annot['r_prev_xy'])
item['prev_xy'] = prev_xy
item['r_prev_xy'] = r_prev_xy
if len(annot['next_xy']) == self.length:
next_xy = torch.Tensor(annot['next_xy'])
r_next_xy = torch.Tensor(annot['r_next_xy'])
else:
# should be padded after
n = len(annot['next_xy'])
next_xy = torch.Tensor(self.length,2).zero_()
r_next_xy = torch.Tensor(self.length,2).zero_()
if n>0:
next_xy[:n] = torch.Tensor(annot['next_xy'])
r_next_xy[:n] = torch.Tensor(annot['r_next_xy'])
item['next_xy'] = next_xy
item['r_next_xy'] = r_next_xy
item['blinkers'] = torch.LongTensor([self.blinkers_to_ix[annot['blinkers']]])
return item
def get_navig_path(self, annot_path):
# Sometimes, due to sampling considerations, the navig annotation doesn't exist.
# We simply take the navig annotation for the closest existing sample
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
annot_path.name)
if not annot_navig_path.exists():
annot_num = int(annot_path.stem)
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num-1:06d}.json")
if not annot_navig_path.exists():
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num+1:06d}.json")
if not annot_navig_path.exists():
annot_navig_path = self.dir_navig_features.joinpath(annot_path.parent.name,
f"{annot_num-2:06d}.json")
return annot_navig_path
def __getitem__(self, idx):
paths = self.index[idx]
y_true = torch.LongTensor(self.win_size).zero_() -1
frames = None
navig = None
item = {}
for frame_id, annot_path in enumerate(paths):
if annot_path is None:
continue
frame_number = int(annot_path.stem) + 1
frames_folder = self.dir_processed_img.joinpath(annot_path.parent.name)
frame_path = frames_folder.joinpath(f"{frame_number:06d}.jpg")
im = Image.open(frame_path)
im = self.im_transform(im)
if frames is None:
frames = torch.Tensor(self.win_size, 3, self.im_h, self.im_w).zero_()
frames[frame_id] = im
annot = json.load(open(annot_path))
event = annot['labels'][self.layer_id]
y_true[frame_id] = self.ix_to_classid.get(event, 0)
if navig is None:
navig = {'prev_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'next_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'r_prev_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'r_next_xy':torch.Tensor(self.win_size, self.length, 2).zero_() - 1,
'xy_polynom':torch.Tensor(self.win_size, 5, 2).zero_() - 1,
'blinkers':torch.LongTensor(self.win_size).zero_() - 1}
annot_navig_path = self.get_navig_path(annot_path)
annot_navig = json.load(open(annot_navig_path))
_navig = self.get_navig(annot_navig)
for k in _navig:
navig[k][frame_id] = _navig[k]
item.update(navig)
item['frames'] = frames
item['idx'] = idx
item['paths'] = paths
item['frame_path'] = paths[self.frame_position]
item['y_true_all'] = y_true
item['y_true'] = y_true[self.frame_position]
for k in navig:
item[k+'_all'] = item[k]
item[k] = item[k+'_all'][self.frame_position]
item['frame_position'] = torch.LongTensor([self.frame_position])
return item
if __name__ == "__main__":
split = "val"
fps = 3
dir_data = Path("/datasets_local/HDD")
nb_threads = 0
horizon = 2
win_size = 21
layer = "goal"
batch_size = 12
use_navig = False
im_size = "small"
dataset = HDDClassif(dir_data,
split,
win_size,
im_size,
layer, # "goal" or "cause"
use_navig=use_navig,
fps=fps,
horizon=horizon, # in seconds
batch_size=batch_size,
debug=False,
shuffle=False,
pin_memory=False,
nb_threads=0)
vidname_to_index = {}
for idx, sequence in enumerate(dataset.index):
vid_name = sequence[0].parent.name
if vid_name not in vidname_to_index:
vidname_to_index[vid_name] = []
vidname_to_index[vid_name].append(idx)
batch_sampler = SequentialBatchSampler(vidname_to_index, batch_size)
N = 0
for batch in batch_sampler:
print(batch)
N += 1
# item = dataset[5]
# loader = dataset.make_batch_loader(batch_size,
# shuffle=False)
# for idx, batch in enumerate(loader):
# break
| 38.084699
| 193
| 0.522204
| 12,208
| 0.875816
| 0
| 0
| 0
| 0
| 0
| 0
| 1,400
| 0.100438
|
e1f01e5ef61eacab7ab09c6ac2aca35cf6f0b034
| 921
|
py
|
Python
|
1W/6/3.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 2
|
2021-11-25T13:38:36.000Z
|
2021-11-25T13:42:56.000Z
|
1W/6/3.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | null | null | null |
1W/6/3.py
|
allenalvin333/Hackerrank_Prep
|
26ed5b874daba4775d006824d36f9e82ea5ff1ea
|
[
"MIT"
] | 1
|
2021-11-25T13:38:43.000Z
|
2021-11-25T13:38:43.000Z
|
# https://www.hackerrank.com/challenges/one-week-preparation-kit-jesse-and-cookies/problem
#!/bin/python3
import math
import os
import random
import re
import sys
import heapq
#
# Complete the 'cookies' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY A
#
def cookies(k, A, z=0):
heapq.heapify(A)
while True:
a = heapq.heappop(A)
if(a>=k): return z
if(len(A)==0): return -1
b = heapq.heappop(A)
heapq.heappush(A,(a+2*b))
z+=1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
A = list(map(int, input().rstrip().split()))
result = cookies(k, A)
fptr.write(str(result) + '\n')
fptr.close()
| 20.021739
| 90
| 0.633008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.331162
|
e1f029d6dec3a3f66d804dec3fb860fb4b271b25
| 3,895
|
py
|
Python
|
toughio/capillarity/_base.py
|
keurfonluu/toughio
|
1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f
|
[
"BSD-3-Clause-LBNL"
] | 21
|
2020-03-05T20:03:58.000Z
|
2022-03-14T23:17:42.000Z
|
toughio/capillarity/_base.py
|
keurfonluu/toughio
|
1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f
|
[
"BSD-3-Clause-LBNL"
] | 60
|
2020-02-14T22:53:01.000Z
|
2022-03-26T07:24:19.000Z
|
toughio/capillarity/_base.py
|
keurfonluu/toughio
|
1db0600ee5ad1abb5ca858c81c8ac5226c9dbb4f
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2020-02-28T08:15:36.000Z
|
2022-03-13T23:26:24.000Z
|
from abc import ABCMeta, abstractmethod, abstractproperty
import numpy
__all__ = [
"BaseCapillarity",
]
# See <https://stackoverflow.com/questions/35673474/using-abc-abcmeta-in-a-way-it-is-compatible-both-with-python-2-7-and-python-3-5>
ABC = ABCMeta("ABC", (object,), {"__slots__": ()})
class BaseCapillarity(ABC):
_id = None
_name = ""
def __init__(self, *args):
"""
Base class for capillarity models.
Do not use.
"""
pass
def __repr__(self):
"""Display capillarity model informations."""
out = ["{} capillarity model (ICP = {}):".format(self._name, self._id)]
out += [
" CP({}) = {}".format(i + 1, parameter)
for i, parameter in enumerate(self.parameters)
]
return "\n".join(out)
def __call__(self, sl):
"""Calculate capillary pressure given liquid saturation."""
if numpy.ndim(sl) == 0:
if not (0.0 <= sl <= 1.0):
raise ValueError()
return self._eval(sl, *self.parameters)
else:
sl = numpy.asarray(sl)
if not numpy.logical_and((sl >= 0.0).all(), (sl <= 1.0).all()):
raise ValueError()
return numpy.array([self._eval(sat, *self.parameters) for sat in sl])
@abstractmethod
def _eval(self, sl, *args):
raise NotImplementedError()
def plot(self, n=100, ax=None, figsize=(10, 8), plt_kws=None):
"""
Plot capillary pressure curve.
Parameters
----------
n : int, optional, default 100
Number of saturation points.
ax : matplotlib.pyplot.Axes or None, optional, default None
Matplotlib axes. If `None`, a new figure and axe is created.
figsize : array_like or None, optional, default None
New figure size if `ax` is `None`.
plt_kws : dict or None, optional, default None
Additional keywords passed to :func:`matplotlib.pyplot.semilogy`.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"Plotting capillary pressure curve requires matplotlib to be installed."
)
if not (isinstance(n, int) and n > 1):
raise ValueError()
if not (ax is None or isinstance(ax, plt.Axes)):
raise TypeError()
if not (figsize is None or isinstance(figsize, (tuple, list, numpy.ndarray))):
raise TypeError()
if len(figsize) != 2:
raise ValueError()
if not (plt_kws is None or isinstance(plt_kws, dict)):
raise TypeError()
# Plot parameters
plt_kws = plt_kws if plt_kws is not None else {}
_kwargs = {"linestyle": "-", "linewidth": 2}
_kwargs.update(plt_kws)
# Initialize figure
if ax:
ax1 = ax
else:
figsize = figsize if figsize else (8, 5)
fig = plt.figure(figsize=figsize, facecolor="white")
ax1 = fig.add_subplot(1, 1, 1)
# Calculate capillary pressure
sl = numpy.linspace(0.0, 1.0, n)
pcap = self(sl)
# Plot
ax1.semilogy(sl, numpy.abs(pcap), **_kwargs)
ax1.set_xlim(0.0, 1.0)
ax1.set_xlabel("Saturation (liquid)")
ax1.set_ylabel("Capillary pressure (Pa)")
ax1.grid(True, linestyle=":")
plt.draw()
plt.show()
return ax1
@property
def id(self):
"""Return capillarity model ID in TOUGH."""
return self._id
@property
def name(self):
"""Return capillarity model name."""
return self._name
@abstractproperty
def parameters(self):
raise NotImplementedError()
@parameters.setter
def parameters(self, value):
raise NotImplementedError()
| 29.507576
| 132
| 0.562773
| 3,596
| 0.923235
| 0
| 0
| 452
| 0.116046
| 0
| 0
| 1,266
| 0.325032
|
e1f08688ada9b36c08693a0c6eb7ff57ba0e5786
| 23,988
|
py
|
Python
|
gui.py
|
NejcHirci/material-addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | 4
|
2022-01-31T14:26:39.000Z
|
2022-02-06T06:34:27.000Z
|
gui.py
|
NejcHirci/material-addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | 2
|
2021-11-30T12:19:27.000Z
|
2021-11-30T12:42:10.000Z
|
gui.py
|
NejcHirci/material-addon
|
c08e2081413c3319b712c2f7193ac8013f601382
|
[
"MIT"
] | null | null | null |
import bpy
import glob
from bpy.types import Panel, Operator
from bpy.app.handlers import persistent
import os
import threading
from queue import Queue
from pathlib import Path
from . mix_ops import *
from . matgan_ops import *
from . neural_ops import *
cache_path = os.path.join(Path(__file__).parent.resolve(), '.cache')
# Redraw all function
def redraw_all(context):
for area in context.screen.areas:
if area.type in ['NODE_EDITOR']:
area.tag_redraw()
# Thread function for reading output
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line.decode('utf-8').strip())
out.close()
@persistent
def on_addon_save(dummy):
for mat in bpy.data.materials:
if "matgan" in mat.name:
match = re.match(".+?(?=_matgan_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["MaterialGAN_Path"], 'out')
update_matgan(obj, dir)
elif "neural" in mat.name:
match = re.match(".+?(?=_neural_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["Neural_Path"], 'out')
update_neural(obj, dir)
elif "mix" in mat.name:
match = re.match(".+?(?=_mix_mat)", mat.name)
obj_name = match[0] if match else ""
if obj_name in bpy.data.objects:
obj = bpy.data.objects[obj_name]
dir = os.path.join(obj["Algorithmic_Path"], 'out')
update_mix(obj, dir)
@persistent
def on_addon_load(dummy):
MAT_OT_MATGAN_GetInterpolations._popen = None
MAT_OT_MATGAN_Generator._popen = None
MAT_OT_MATGAN_InputFromFlashImage._popen = None
MAT_OT_MATGAN_SuperResolution._popen = None
blender_path = os.path.join(Path(__file__).parent.resolve(), 'final.blend')
with bpy.data.libraries.load(blender_path, link=False) as (data_from, data_to):
data_to.materials = [mat for mat in data_from.materials]
group_list = ['photo_to_pbr', 'Aluminium', 'Wood', 'Plastic', 'Plaster', 'Leather', 'Silk', 'Concrete', 'Marble']
data_to.node_groups = [n for n in data_from.node_groups if n in group_list]
if not os.path.exists(cache_path):
os.makedirs(cache_path)
else:
for root, dirs, files in os.walk(cache_path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# Load mix images
names = ['Aluminium', 'Wood', 'Plastic', 'Plaster', 'Leather', 'Silk', 'Concrete', 'Marble']
for i in names:
img = bpy.data.images.load(os.path.join(Path(__file__).parent.resolve(), f'algorithmic/{i}.png'))
img.name = i
img.preview_ensure()
def update_active_mat(self, context):
active_obj = bpy.context.active_object
if active_obj:
if context.scene.SelectWorkflow == 'MatGAN':
base_name = "matgan_mat"
elif context.scene.SelectWorkflow == 'NeuralMAT':
base_name = "neural_mat"
elif context.scene.SelectWorkflow == 'MixMAT':
base_name = "mix_mat"
name = f"{active_obj.name}_{base_name}"
if name not in bpy.data.materials:
mat = bpy.data.materials[base_name].copy()
mat.name = name
else:
mat = bpy.data.materials[name]
active_obj.active_material = mat
if context.scene.SelectWorkflow == 'MatGAN' and 'MaterialGAN_Path' in active_obj:
bpy.context.scene.matgan_properties.directory = active_obj['MaterialGAN_Path']
elif context.scene.SelectWorkflow == 'NeuralMAT' and 'Neural_Path' in active_obj:
bpy.context.scene.neural_properties.directory = active_obj['Neural_Path']
elif context.scene.SelectWorkflow == 'MixMAT' and 'Algorithmic_Path' in active_obj:
bpy.context.scene.mixmat_properties.directory = active_obj['Algorithmic_Path']
# Copy files to .cache folder
def copy_to_cache(src_path, name):
dst_path = os.path.join(cache_path, name)
if not os.path.exists(dst_path):
os.makedirs(dst_path)
if os.path.isdir(src_path):
for file in os.listdir(os.fsencode(src_path)):
f = os.fsdecode(file)
if f.endswith(".png") or f.endswith(".pt") or f.endswith('.ckpt'):
shutil.copyfile(os.path.join(src_path, f), os.path.join(dst_path, f))
def register():
if on_addon_load not in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.append(on_addon_load)
if on_addon_save not in bpy.app.handlers.save_pre:
bpy.app.handlers.save_pre.append(on_addon_save)
bpy.types.Scene.SelectWorkflow = bpy.props.EnumProperty(
name='Material System Select',
description='Selected Material System for editing and generation.',
items={
('MatGAN', 'MaterialGAN + LIIF', 'Using MaterialGAN for generation and LIIF model for upscaling. ' \
+ 'Editing implemented as vector space exploration.'),
('NeuralMAT', 'Neural Material', 'Using Neural Material model for generatiog. ' \
+ 'Editing implemented as material interpolations.'),
('MixMAT', 'Algorithmic generation', 'Using a Blender shader nodes approach for ' \
+ 'generating textures from albedo with mix blender shader nodes for editing.')
},
default='MatGAN',
update=update_active_mat
)
def unregister():
if on_addon_load in bpy.app.handlers.load_post:
bpy.app.handlers.load_post.remove(on_addon_load)
if on_addon_save in bpy.app.handlers.save_pre:
bpy.app.handlers.save_pre.remove(on_addon_save)
class MAT_PT_GeneratorPanel(Panel):
bl_space_type = "NODE_EDITOR"
bl_region_type = "UI"
bl_label = "Modifier operations"
bl_category = "MaterialGenerator Util"
thumb_scale = 8.0
check_existing = False
mix_preview = None
def draw_matgan(self, context):
layout = self.layout
matgan = bpy.context.scene.matgan_properties
# ================================================
# Draw MaterialGAN props and operators
# ================================================
row = layout.row()
row.prop(matgan, "progress", emboss=False, text="Status")
row = layout.row()
col = row.column()
col.prop(matgan, "num_rend", text="Num of images")
col = row.column()
col.prop(matgan, "epochs", text="Epochs")
row = layout.row()
row.prop(matgan, "directory", text="Directory")
row.operator("matgan.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
col = row.column()
col.operator("matgan.input_from_images", text="Format flash images")
row = layout.row()
col = row.column()
col.operator("matgan.mat_from_images", text="Generate Material")
col = row.column()
col.operator("matgan.stop_generator", text="", icon="PAUSE")
layout.separator()
# ================================================
# Draw Upscale LIIF
# ================================================
row = layout.row()
col = row.column()
col.prop(matgan, "h_res", text="Height resolution")
col = row.column()
col.prop(matgan, "w_res", text="Width resolution")
row = layout.row()
row.operator("matgan.super_res", text="Upscale material")
layout.separator()
row = layout.row()
row.operator("matgan.get_interpolations", text="Get interpolations")
layout.separator()
# ================================================
# Draw Gallery view
# ================================================
if MAT_OT_MATGAN_GetInterpolations._popen is None and MAT_OT_MATGAN_Generator._popen is None:
row = layout.row()
row.operator("matgan.revert_material", text="Revert material to previous")
self.draw_gallery(context, matgan, "matgan")
def draw_gallery(self, context, gan, mode):
x = MAT_OT_GalleryDirection.direction
interp_dir = os.path.join(gan.directory, 'interps')
out_dir = os.path.join(gan.directory, 'out')
rname = f"{bpy.context.active_object.name}_{mode}" if bpy.context.active_object else mode
if f'7_{x}_render.png' in bpy.data.images and f"{rname}_render.png" in bpy.data.images:
layout = self.layout
row = layout.row()
sign = '+' if MAT_OT_GalleryDirection.direction == 1 else '-'
row.operator("wm.edit_direction_toggle", text="Toggle direction")
box = layout.box()
cols = box.column_flow(columns=3)
# Get images
dir_list = sorted(glob.glob(interp_dir + f'/*_{x}_render.png'))
id = 0
for dir in dir_list:
if id == 4:
in_box = cols.box()
col = in_box.column()
img = bpy.data.images[f'{rname}_render.png']
img.preview_ensure()
col.template_icon(icon_value=img.preview.icon_id, scale=10)
col.label(text="Current material")
name = os.path.split(dir)[1]
img = bpy.data.images[name]
img.preview_ensure()
in_box = cols.box()
col = in_box.column()
col.template_icon(icon_value=img.preview.icon_id, scale=10)
operator = col.operator(f'{mode}.edit_move', text=f"Semantic {sign}{name[0]}")
operator.direction = name[0]
id += 1
def draw_neural(self, context):
layout = self.layout
neural = bpy.context.scene.neural_properties
# ================================================
# Draw NeuralMaterial props and operators
# ================================================
row = layout.row()
row.prop(neural, "progress", emboss=False, text="Status")
row = layout.row()
col = row.column()
col.prop(neural, "num_rend", text="Images")
col = row.column()
col.prop(neural, "epochs", text="Epochs")
col = row.column()
col.prop(neural, "seed", text="Seed")
row = layout.row()
col = row.column()
col.prop(neural, "h_res", text="Height resolution")
col = row.column()
col.prop(neural, "w_res", text="Width resolution")
row = layout.row()
row.prop(neural, "directory", text="Directory")
row.operator("neural.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
col = row.column()
col.operator("neural.generator", text="Generate Material")
col = row.column()
col.operator("neural.stop_generator", text="", icon="PAUSE")
row = layout.row()
col = row.column()
col.operator("neural.reseed", text="Upscale Material")
layout.separator()
# ================================================
# Draw NeuralMaterial interpolations operator
# ================================================
row = layout.row()
row.operator("neural.get_interpolations", text="Get interpolations")
layout.separator()
# ================================================
# Draw Gallery view
# ================================================
if MAT_OT_NEURAL_GetInterpolations._popen is None and MAT_OT_NEURAL_Generator._popen is None:
row = layout.row()
row.operator("neural.revert_material", text="Revert material to previous")
self.draw_gallery(context, neural, "neural")
def draw_mixmat(self, context):
layout = self.layout
mix = bpy.context.scene.mixmat_properties
# ================================================
# Draw Mix Materials generator operator
# ================================================
row = layout.row()
row.prop(mix, "progress", emboss=False, text="Status")
row = layout.row()
row.prop(mix, "directory", text="Directory")
row.operator("mixmat.file_browser", icon="FILE_FOLDER", text="")
row = layout.row()
row.operator("mixmat.generator", text="Generate")
layout.separator()
# ================================================
# Draw Mix material interpolations operator
# ================================================
row = layout.row()
row.prop(mix, "material", text="Select")
if 'Material' in mix.progress:
row.prop(mix, "value", text="Mix level")
layout.separator()
row = layout.row()
img = bpy.data.images[mix.material]
row.template_icon(icon_value=img.preview.icon_id, scale=10)
def draw(self, context):
self.layout.prop(context.scene, 'SelectWorkflow')
if context.scene.SelectWorkflow == 'MatGAN':
self.draw_matgan(context)
elif context.scene.SelectWorkflow == 'NeuralMAT':
self.draw_neural(context)
elif context.scene.SelectWorkflow == 'MixMAT':
self.draw_mixmat(context)
class MAT_OT_StatusUpdater(Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.modal_status_updater"
bl_label = "Modal Status Updater"
_sTime = 0
_timer = None
_thread = None
_q = Queue()
def modal(self, context, event):
gan = bpy.context.scene.matgan_properties
if event.type == 'TIMER':
if MAT_OT_MATGAN_Generator._popen:
if MAT_OT_MATGAN_Generator._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
update_matgan(bpy.context.active_object, os.path.join(gan.directory, 'out'))
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_matgan(bpy.context.active_object, os.path.join(cache_path, name))
gan.progress = "Material generated."
redraw_all(context)
MAT_OT_MATGAN_Generator._popen = None
self.cancel(context)
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
return {'CANCELLED'}
elif MAT_OT_MATGAN_InputFromFlashImage._popen:
if MAT_OT_MATGAN_InputFromFlashImage._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
gan.progress = "Input ready."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_MATGAN_InputFromFlashImage._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_MATGAN_SuperResolution._popen:
if MAT_OT_MATGAN_SuperResolution._popen.poll() is not None:
gan.progress = "Material upscaled."
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_matgan(bpy.context.active_object, os.path.join(cache_path, name))
redraw_all(context)
MAT_OT_MATGAN_SuperResolution._popen = None
self._thread = None
self.cancel(context)
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
return {'CANCELLED'}
elif MAT_OT_MATGAN_GetInterpolations._popen:
if MAT_OT_MATGAN_GetInterpolations._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_matgan" if bpy.context.active_object else "matgan"
check_remove_img(f'{name}_render.png')
img = bpy.data.images.load(os.path.join(gan.directory, 'out') + '/render.png')
img.name = f'{name}_render.png'
interp_path = os.path.join(gan.directory, 'interps')
dir_list = sorted(glob.glob(interp_path + '/*_*_render.png'))
for dir in dir_list:
check_remove_img(os.path.split(dir)[1])
img = bpy.data.images.load(dir)
img.name = os.path.split(dir)[1]
gan.progress = "Material interpolations generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_MATGAN_GetInterpolations._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_NEURAL_Generator._popen:
gan = bpy.context.scene.neural_properties
if MAT_OT_NEURAL_Generator._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
update_neural(bpy.context.active_object, os.path.join(gan.directory, 'out'))
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_neural" if bpy.context.active_object else "neural"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_neural(bpy.context.active_object, os.path.join(cache_path, name))
gan.progress = "Material generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
MAT_OT_NEURAL_Generator._popen = None
self.cancel(context)
return {'CANCELLED'}
elif MAT_OT_NEURAL_GetInterpolations._popen:
gan = bpy.context.scene.neural_properties
if MAT_OT_NEURAL_GetInterpolations._popen.poll() is None:
try:
line = self._q.get_nowait()
print(line)
gan.progress = line
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
redraw_all(context)
except:
pass
else:
name = f"{bpy.context.active_object.name}_neural" if bpy.context.active_object else "neural"
check_remove_img(f'{name}_render.png')
img = bpy.data.images.load(os.path.join(gan.directory, 'out') + '/render.png')
img.name = f'{name}_render.png'
interp_path = os.path.join(gan.directory, 'interps')
dir_list = sorted(glob.glob(interp_path + '/*_*_render.png'))
for dir in dir_list:
check_remove_img(os.path.split(dir)[1])
img = bpy.data.images.load(dir)
img.name = os.path.split(dir)[1]
gan.progress = "Material interpolations generated."
gan.progress += f" Elapsed time: {time.time()-self._sTime:.3f}"
copy_to_cache(os.path.join(gan.directory, 'out'), name)
update_neural(bpy.context.active_object, os.path.join(cache_path, name))
redraw_all(context)
MAT_OT_NEURAL_GetInterpolations._popen = None
self.cancel(context)
return {'CANCELLED'}
else:
self.cancel(context)
return {'CANCELLED'}
return {'PASS_THROUGH'}
def execute(self, context):
self._sTime = time.time()
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, window=context.window)
wm.modal_handler_add(self)
if MAT_OT_MATGAN_Generator._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_Generator._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_InputFromFlashImage._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_InputFromFlashImage._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_GetInterpolations._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_GetInterpolations._popen.stdout, self._q), daemon=True)
elif MAT_OT_MATGAN_SuperResolution._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_MATGAN_SuperResolution._popen.stdout, self._q), daemon=True)
elif MAT_OT_NEURAL_Generator._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_NEURAL_Generator._popen.stdout, self._q), daemon=True)
elif MAT_OT_NEURAL_GetInterpolations._popen:
self._thread = threading.Thread(target=enqueue_output, args=(MAT_OT_NEURAL_GetInterpolations._popen.stdout, self._q), daemon=True)
self._thread.start()
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
class MAT_OT_GalleryDirection(Operator):
"""Operator which switches gallery edit direction"""
bl_idname = "wm.edit_direction_toggle"
bl_label = "Direction switch operator"
direction = 1
def execute(self, context):
if MAT_OT_GalleryDirection.direction == 1:
MAT_OT_GalleryDirection.direction = 2
bpy.context.scene.matgan_properties.direction = MAT_OT_GalleryDirection.direction = 2
bpy.context.scene.neural_properties.direction = MAT_OT_GalleryDirection.direction = 2
else:
MAT_OT_GalleryDirection.direction = 1
bpy.context.scene.matgan_properties.direction = MAT_OT_GalleryDirection.direction = 1
bpy.context.scene.neural_properties.direction = MAT_OT_GalleryDirection.direction = 1
return {'FINISHED'}
| 42.306878
| 144
| 0.557654
| 17,980
| 0.749541
| 0
| 0
| 2,377
| 0.099091
| 0
| 0
| 5,039
| 0.210063
|
e1f180db019536ccc2e9f00c32c47da031376111
| 4,266
|
py
|
Python
|
run.py
|
kbeyer/RPi-LED-SpectrumAnalyzer
|
f5a5f1210f02188599eb308f5737392ce8c93218
|
[
"MIT"
] | 14
|
2015-01-09T12:26:06.000Z
|
2021-03-22T22:16:53.000Z
|
run.py
|
kbeyer/RPi-LED-SpectrumAnalyzer
|
f5a5f1210f02188599eb308f5737392ce8c93218
|
[
"MIT"
] | 4
|
2015-07-19T07:20:51.000Z
|
2017-02-01T16:11:22.000Z
|
run.py
|
kbeyer/RPi-LED-SpectrumAnalyzer
|
f5a5f1210f02188599eb308f5737392ce8c93218
|
[
"MIT"
] | 4
|
2016-03-07T12:12:08.000Z
|
2018-03-04T21:57:13.000Z
|
""" Main entry point for running the demo. """
# Standard library
import time
import sys
# Third party library
import alsaaudio as aa
# Local library
from char import show_text
from hs_logo import draw_logo
from leds import ColumnedLEDStrip
from music import calculate_levels, read_musicfile_in_chunks, calculate_column_frequency
from shairplay import initialize_shairplay, shutdown_shairplay, RaopCallbacks
COLUMNS = 12
GAP_LEDS = 0
TOTAL_LEDS = 100
SKIP_LEDS = 4
SAMPLE_RATE = 44100
NUM_CHANNELS = 2
FORMAT = aa.PCM_FORMAT_S16_LE
PERIOD_SIZE = 2048
frequency_limits = calculate_column_frequency(200, 10000, COLUMNS)
def analyze_airplay_input(led_strip):
from os.path import join
lib_path = join(sys.prefix, 'lib')
initialize_shairplay(lib_path, get_shairplay_callback_class(led_strip))
while True:
try:
pass
except KeyboardInterrupt:
shutdown_shairplay()
break
def analyze_audio_file(led_strip, path):
for chunk, sample_rate in read_musicfile_in_chunks(path, play_audio=True):
data = calculate_levels(chunk, sample_rate, frequency_limits)
led_strip.display_data(data)
def analyze_line_in(led_strip, hacker_school=True):
start_time = time.time()
while True:
if hacker_school and time.time() - start_time > 60 * 2:
hacker_school_display()
start_time = time.time()
size, chunk = input.read()
if size > 0:
L = (len(chunk)/2 * 2)
chunk = chunk[:L]
data = calculate_levels(chunk, SAMPLE_RATE, frequency_limits)
led_strip.display_data(data[::-1])
def get_audio_input():
input = aa.PCM(aa.PCM_CAPTURE, aa.PCM_NONBLOCK)
input.setchannels(NUM_CHANNELS)
input.setformat(aa.PCM_FORMAT_S16_BE)
input.setrate(SAMPLE_RATE)
input.setperiodsize(PERIOD_SIZE)
return input
def get_led_strip():
led = ColumnedLEDStrip(
leds=TOTAL_LEDS, columns=COLUMNS, gap_leds=GAP_LEDS, skip_leds=SKIP_LEDS
)
led.all_off()
return led
def get_shairplay_callback_class(led_strip):
class SampleCallbacks(RaopCallbacks):
def audio_init(self, bits, channels, samplerate):
print "Initializing", bits, channels, samplerate
self.bits = bits
self.channels = channels
self.samplerate = samplerate
min_frequency = 500
max_frequency = samplerate / 30 * 10 # Abusing integer division
self.frequency_limits = calculate_column_frequency(
min_frequency, max_frequency, COLUMNS
)
self.buffer = ''
def audio_process(self, session, buffer):
data = calculate_levels(buffer, self.samplerate, self.frequency_limits, self.channels, self.bits)
led_strip.display_data(data[::-1])
def audio_destroy(self, session):
print "Destroying"
def audio_set_volume(self, session, volume):
print "Set volume to", volume
def audio_set_metadata(self, session, metadata):
print "Got", len(metadata), "bytes of metadata"
def audio_set_coverart(self, session, coverart):
print "Got", len(coverart), "bytes of coverart"
return SampleCallbacks
def hacker_school_display(led_strip):
draw_logo(led_strip)
time.sleep(1)
show_text(led_strip, 'NEVER GRADUATE!', x_offset=3, y_offset=1, sleep=0.5)
if __name__ == '__main__':
from textwrap import dedent
input_types = ('local', 'linein', 'airplay')
usage = dedent("""\
Usage: %s <input-type> [additional arguments]
input-type: should be one of %s
To play a local file, you can pass the path to the file as an additional
argument.
""") % (sys.argv[0], input_types)
if len(sys.argv) == 1:
print usage
sys.exit(1)
input_type = sys.argv[1]
led_strip = get_led_strip()
if input_type == 'local':
path = sys.argv[2] if len(sys.argv) > 2 else 'sample.mp3'
analyze_audio_file(led_strip, path)
elif input_type == 'airplay':
analyze_airplay_input(led_strip)
elif input_type == 'linein':
analyze_line_in(led_strip)
else:
print usage
sys.exit(1)
| 28.44
| 109
| 0.665495
| 1,145
| 0.268401
| 0
| 0
| 0
| 0
| 0
| 0
| 502
| 0.117675
|
e1f1f1c95fd75ee0bf2a6e9603b88f2d439ebe8f
| 2,924
|
py
|
Python
|
2020/07/solution.py
|
dglmoore/advent-of-code
|
ca6e39a842a84ad5271891535c9323e057261d44
|
[
"MIT"
] | null | null | null |
2020/07/solution.py
|
dglmoore/advent-of-code
|
ca6e39a842a84ad5271891535c9323e057261d44
|
[
"MIT"
] | null | null | null |
2020/07/solution.py
|
dglmoore/advent-of-code
|
ca6e39a842a84ad5271891535c9323e057261d44
|
[
"MIT"
] | null | null | null |
import re
def part1(lines, yourbag="shiny gold"):
# A nice little regex that will extract a list of all bags in a given line.
# The first is the outermost bag, and the rest are inner bags.
pattern = re.compile(r"(?:\d*)\s*(.*?)\s*bags?[.,]?(?: contain)?\s*")
# We're going to use an adjacency list mapping each bag type to the bag
# types that can contain it.
contained_by = dict()
for line in lines:
outer, *innards = pattern.findall(line)
for inner in innards:
if inner != 'no other':
if inner in contained_by:
contained_by[inner].append(outer)
else:
contained_by[inner] = [outer]
# We're going to start at our bag type. Ask which bag types can contain it,
# add those to as stack, and then add our bag type to the set of all
# "working" outer bag types. Then pop the top bag type of the stack and
# repeat the above process. This continues until the stack is empty.
#
# The answer is then the number of bags in our set (less 1 for our inital
# bag).
#
# This is an alternative to using recursion. Really, though, it's just
# doing the recursion manually. The pushing and the popping off of the
# stack is done for you when you use recursion... you just can't see the
# stack... it's maintained internally. For more information google "call
# stack".
stack = [yourbag]
works = set()
while len(stack) != 0:
bag = stack.pop()
if bag not in works:
if bag in contained_by:
stack.extend(contained_by[bag])
works.add(bag)
return len(works) - 1
def part2(lines, yourbag="shiny gold"):
# This regex is similar to part 1 except it includes the number of times an
# inner bag type must occur.
pattern = re.compile(r"(\d*)\s*(.*?)\s*bags?[.,]?(?: contain)?\s*")
# We'll be keeping an adjacency list mapping each outer bag type to a list
# of the required inner bags and their multiplicies.
must_contain = dict()
for line in lines:
(_, outer), *innards = pattern.findall(line)
for (n, inner) in innards:
if inner != 'no other':
if outer in must_contain:
must_contain[outer].append((inner, int(n)))
else:
must_contain[outer] = [(inner, int(n))]
# I'll leave it to you to work this one out. ;-)
stack = [(yourbag, 1)]
numbags = 0
while len(stack) != 0:
bag, n = stack.pop()
numbags += n
if bag in must_contain:
for innerbag, m in must_contain[bag]:
stack.append((innerbag, n * m))
return numbags - 1
if __name__ == '__main__':
with open("test.txt") as handle:
lines = handle.readlines()
print("Part I: ", part1(lines))
print("Part II:", part2(lines))
| 35.658537
| 79
| 0.591313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,346
| 0.460328
|
e1f2c620730a24383f1404677c275f4158ee87bb
| 1,981
|
py
|
Python
|
src/m6_your_turtles.py
|
polsteaj/01-IntroductionToPython
|
155f56f66a5746baa4d5319d4e79c14aa857199b
|
[
"MIT"
] | null | null | null |
src/m6_your_turtles.py
|
polsteaj/01-IntroductionToPython
|
155f56f66a5746baa4d5319d4e79c14aa857199b
|
[
"MIT"
] | null | null | null |
src/m6_your_turtles.py
|
polsteaj/01-IntroductionToPython
|
155f56f66a5746baa4d5319d4e79c14aa857199b
|
[
"MIT"
] | null | null | null |
"""
Your chance to explore Loops and Turtles!
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Amanda Stouder,
their colleagues and Alec Polster.
"""
import rosegraphics as rg
###############################################################################
# DONE: 1.
# On Line 5 above, replace PUT_YOUR_NAME_HERE with your own name.
###############################################################################
###############################################################################
# DONE: 2.
# You should have RUN the m5e_loopy_turtles module and READ its code.
# (Do so now if you have not already done so.)
#
# Below this comment, add ANY CODE THAT YOU WANT, as long as:
# 1. You construct at least 2 rg.SimpleTurtle objects.
# 2. Each rg.SimpleTurtle object draws something
# (by moving, using its rg.Pen). ANYTHING is fine!
# 3. Each rg.SimpleTurtle moves inside a LOOP.
#
# Be creative! Strive for way-cool pictures! Abstract pictures rule!
#
# If you make syntax (notational) errors, no worries -- get help
# fixing them at either this session OR at the NEXT session.
#
# Don't forget to COMMIT-and-PUSH when you are done with this module.
###############################################################################
window = rg.TurtleWindow()
my_turtle = rg.SimpleTurtle('turtle')
my_turtle.pen = rg.Pen('blue', 10)
my_turtle.speed = 10
your_turtle = rg.SimpleTurtle()
your_turtle.pen = rg.Pen('red', 5)
your_turtle.speed = 10
your_turtle.pen_up()
your_turtle.forward(3)
your_turtle.pen_down()
size = 300
for k in range(15):
my_turtle.draw_square(size)
my_turtle.pen_up()
my_turtle.right(45)
my_turtle.forward(10)
my_turtle.left(45)
my_turtle.pen_down()
your_turtle.draw_square(size-100)
your_turtle.pen_up()
your_turtle.right(45)
your_turtle.forward(10)
your_turtle.left(45)
your_turtle.pen_down()
size = size - 20
window.close_on_mouse_click()
| 33.576271
| 79
| 0.594144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,266
| 0.639071
|
e1f30a4f4d1925bf5687b7cf412adf4bd33cee9b
| 84
|
py
|
Python
|
docs/ResearchSession/manage.py
|
VoIlAlex/pytorchresearch
|
c4f08cd0ec6b78788e682005c099aef4582640cb
|
[
"MIT"
] | 1
|
2020-12-13T20:25:27.000Z
|
2020-12-13T20:25:27.000Z
|
docs/ResearchSession/manage.py
|
VoIlAlex/pytorchresearch
|
c4f08cd0ec6b78788e682005c099aef4582640cb
|
[
"MIT"
] | null | null | null |
docs/ResearchSession/manage.py
|
VoIlAlex/pytorchresearch
|
c4f08cd0ec6b78788e682005c099aef4582640cb
|
[
"MIT"
] | null | null | null |
from backbone import entry_point
if __name__ == '__main__':
entry_point.main()
| 16.8
| 32
| 0.738095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.119048
|
e1f4c12b169ff0fc2c245e310a2a7024653caedb
| 116
|
py
|
Python
|
base.py
|
chenzhangyu/WeiboOAuth
|
a00cc5983e989bb2ea8907b8d590a0a6c750d804
|
[
"MIT"
] | 1
|
2019-10-10T08:26:08.000Z
|
2019-10-10T08:26:08.000Z
|
base.py
|
chenzhangyu/WeiboOAuth
|
a00cc5983e989bb2ea8907b8d590a0a6c750d804
|
[
"MIT"
] | null | null | null |
base.py
|
chenzhangyu/WeiboOAuth
|
a00cc5983e989bb2ea8907b8d590a0a6c750d804
|
[
"MIT"
] | 1
|
2019-04-12T09:42:03.000Z
|
2019-04-12T09:42:03.000Z
|
# encoding=utf-8
__author__ = 'lance'
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
pass
| 12.888889
| 46
| 0.75
| 55
| 0.474138
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.198276
|
e1f4f6334ab0ff9c96e987467be3ce874e28f3d7
| 2,958
|
py
|
Python
|
paddlers/custom_models/cd/cdnet.py
|
huilin16/PaddleRS
|
ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a
|
[
"Apache-2.0"
] | 40
|
2022-02-28T02:07:28.000Z
|
2022-03-31T09:54:29.000Z
|
paddlers/custom_models/cd/cdnet.py
|
huilin16/PaddleRS
|
ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a
|
[
"Apache-2.0"
] | 5
|
2022-03-15T12:13:33.000Z
|
2022-03-31T15:54:08.000Z
|
paddlers/custom_models/cd/cdnet.py
|
huilin16/PaddleRS
|
ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a
|
[
"Apache-2.0"
] | 20
|
2022-02-28T02:07:31.000Z
|
2022-03-31T11:40:40.000Z
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
class CDNet(nn.Layer):
def __init__(self, in_channels=6, num_classes=2):
super(CDNet, self).__init__()
self.conv1 = Conv7x7(in_channels, 64, norm=True, act=True)
self.pool1 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv2 = Conv7x7(64, 64, norm=True, act=True)
self.pool2 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv3 = Conv7x7(64, 64, norm=True, act=True)
self.pool3 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv4 = Conv7x7(64, 64, norm=True, act=True)
self.pool4 = nn.MaxPool2D(2, 2, return_mask=True)
self.conv5 = Conv7x7(64, 64, norm=True, act=True)
self.upool4 = nn.MaxUnPool2D(2, 2)
self.conv6 = Conv7x7(64, 64, norm=True, act=True)
self.upool3 = nn.MaxUnPool2D(2, 2)
self.conv7 = Conv7x7(64, 64, norm=True, act=True)
self.upool2 = nn.MaxUnPool2D(2, 2)
self.conv8 = Conv7x7(64, 64, norm=True, act=True)
self.upool1 = nn.MaxUnPool2D(2, 2)
self.conv_out = Conv7x7(64, num_classes, norm=False, act=False)
def forward(self, t1, t2):
x = paddle.concat([t1, t2], axis=1)
x, ind1 = self.pool1(self.conv1(x))
x, ind2 = self.pool2(self.conv2(x))
x, ind3 = self.pool3(self.conv3(x))
x, ind4 = self.pool4(self.conv4(x))
x = self.conv5(self.upool4(x, ind4))
x = self.conv6(self.upool3(x, ind3))
x = self.conv7(self.upool2(x, ind2))
x = self.conv8(self.upool1(x, ind1))
return [self.conv_out(x)]
class Conv7x7(nn.Layer):
def __init__(self, in_ch, out_ch, norm=False, act=False):
super(Conv7x7, self).__init__()
layers = [
nn.Pad2D(3), nn.Conv2D(
in_ch, out_ch, 7, bias_attr=(False if norm else None))
]
if norm:
layers.append(nn.BatchNorm2D(out_ch))
if act:
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
if __name__ == "__main__":
t1 = paddle.randn((1, 3, 512, 512), dtype="float32")
t2 = paddle.randn((1, 3, 512, 512), dtype="float32")
model = CDNet(6, 2)
pred = model(t1, t2)[0]
print(pred.shape)
| 38.921053
| 75
| 0.610886
| 2,058
| 0.69574
| 0
| 0
| 0
| 0
| 0
| 0
| 637
| 0.215348
|
e1f4fb4322ad7bde9174a243c1005f58f9c30795
| 1,948
|
py
|
Python
|
contrib/make-leap-seconds.py
|
dmgerman/ntpsec
|
28dde8422e1a949e50663ae965d58c2fdbc782b9
|
[
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | null | null | null |
contrib/make-leap-seconds.py
|
dmgerman/ntpsec
|
28dde8422e1a949e50663ae965d58c2fdbc782b9
|
[
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | null | null | null |
contrib/make-leap-seconds.py
|
dmgerman/ntpsec
|
28dde8422e1a949e50663ae965d58c2fdbc782b9
|
[
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | 1
|
2021-09-24T18:19:49.000Z
|
2021-09-24T18:19:49.000Z
|
#!/usr/bin/env python
"""\
make-leap-seconds.py - make leap second file for testing
Optional args are date of leap second: YYYY-MM-DD
and expiration date of file.
Defaults are start of tomorrow (UTC), and 28 days after the leap.
"Start of tomorow" is as soon as possible for testing.
"""
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import print_function, division
import datetime
import sha
import sys
import time
JAN_1970 = 2208988800 # convert Unix/POSIX epoch to NTP epoch
epoch = datetime.datetime.utcfromtimestamp(0)
args = sys.argv[1:]
leap = time.time()
days = int(leap/86400)
leap = (days+1)*86400
if len(args) > 0:
leapdate = datetime.datetime.strptime(args[0], "%Y-%m-%d")
leap = (leapdate - epoch).total_seconds()
leap = int(leap)
args = args[1:]
expire = leap + 28*86400
if len(args) > 0:
expiredate = datetime.datetime.strptime(args[0], "%Y-%m-%d")
expire = (expiredate - epoch).total_seconds()
expire = int(expire)
args = args[1:]
leap_txt = time.asctime(time.gmtime(leap))
leap = str(leap+JAN_1970)
expire_txt = time.asctime(time.gmtime(expire))
expire = str(expire+JAN_1970)
update = int(time.time())
update_txt = time.asctime(time.gmtime(update))
update = str(update+JAN_1970)
tai = "40" # hardwired
# File format
#
# # is comment
# #$ xxx Update Date
# #@ xxx Expiration Date
# #h SHA1 hash of payload
#
# #$ 3676924800
# #@ 3707596800
# 2272060800 10 # 1 Jan 1972
# #h dacf2c42 2c4765d6 3c797af8 2cf630eb 699c8c67
#
# All dates use NTP epoch of 1900-01-01
sha1 = sha.new()
print("%s %s # %s" % (leap, tai, leap_txt))
sha1.update(leap)
sha1.update(tai)
print("#@ %s # %s" % (expire, expire_txt))
sha1.update(expire)
print("#$ %s # %s" % (update, update_txt))
sha1.update(update)
digest = sha1.hexdigest()
print("#h %s %s %s %s %s" %
(digest[0:8], digest[8:16], digest[16:24], digest[24:32], digest[32:40]))
# end
| 24.35
| 79
| 0.664784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 748
| 0.383984
|
e1f5bc34418af89095c0d30d7b41fe28a2137a99
| 695
|
py
|
Python
|
tests/profiling/test_scheduler.py
|
uniq10/dd-trace-py
|
ca9ce1fe552cf03c2828bcd160e537336aa275d5
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-10-17T14:55:46.000Z
|
2020-10-17T14:55:46.000Z
|
tests/profiling/test_scheduler.py
|
uniq10/dd-trace-py
|
ca9ce1fe552cf03c2828bcd160e537336aa275d5
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-12-22T16:56:55.000Z
|
2020-12-22T16:56:55.000Z
|
tests/profiling/test_scheduler.py
|
uniq10/dd-trace-py
|
ca9ce1fe552cf03c2828bcd160e537336aa275d5
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-12-22T16:54:02.000Z
|
2020-12-22T16:54:02.000Z
|
# -*- encoding: utf-8 -*-
from ddtrace.profiling import event
from ddtrace.profiling import exporter
from ddtrace.profiling import recorder
from ddtrace.profiling import scheduler
class _FailExporter(exporter.Exporter):
@staticmethod
def export(events):
raise Exception("BOO!")
def test_exporter_failure():
r = recorder.Recorder()
exp = _FailExporter()
s = scheduler.Scheduler(r, [exp])
r.push_events([event.Event()] * 10)
s.flush()
def test_thread_name():
r = recorder.Recorder()
exp = exporter.NullExporter()
s = scheduler.Scheduler(r, [exp])
s.start()
assert s._worker.name == "ddtrace.profiling.scheduler:Scheduler"
s.stop()
| 23.965517
| 68
| 0.689209
| 113
| 0.16259
| 0
| 0
| 69
| 0.099281
| 0
| 0
| 70
| 0.100719
|
e1f73d543e655fe197f206bbd67ec8e450d4935c
| 5,546
|
py
|
Python
|
scrape_reviews/scrape_reviews/spiders/imdb_spider.py
|
eshwarkoka/sentiment_analysis_on_movie_reviews
|
16ad65904ea1446f0b5d2f432e48581414e12c04
|
[
"MIT"
] | null | null | null |
scrape_reviews/scrape_reviews/spiders/imdb_spider.py
|
eshwarkoka/sentiment_analysis_on_movie_reviews
|
16ad65904ea1446f0b5d2f432e48581414e12c04
|
[
"MIT"
] | 2
|
2020-09-09T16:48:28.000Z
|
2020-09-09T16:48:36.000Z
|
scrape_reviews/scrape_reviews/spiders/imdb_spider.py
|
eshwarkoka/sentiment_analysis_on_movie_reviews
|
16ad65904ea1446f0b5d2f432e48581414e12c04
|
[
"MIT"
] | null | null | null |
import scrapy,json,re,time,os,glob
from scrapy.exceptions import CloseSpider
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
#get all the imdb xpaths from xpaths.json file
with open('./locators/xpaths.json') as f:
xpaths = json.load(f)
imdb = xpaths["imdb"][0]
#define all the required variables
movie_name = ''
project_path = r'/Users/eshwar/Documents/projects/sentiment_analysis_on_movie_reviews/'
scraped_reviews_path = project_path + "data/scraped_reviews/"
predicted_reviews_path = project_path + "data/predicted_reviews/"
chrome_driver_path = project_path+"scrape_reviews/chrome_driver/chromedriver"
class IMDBSpider(scrapy.Spider):
name = 'imdb_spider'
allowed_domains = ["imdb.com"]
start_urls = [
'https://www.imdb.com/find?ref_=nv_sr_fn&q='
]
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url+self.ip+"&s=tt" , dont_filter=True)
def parse(self, response):
#get all the globally defined variables
global movie_name, project_path, scraped_reviews_path, chrome_driver_path
#get first title
first_title = response.xpath(imdb["first_title"]).extract()
#extract title id from first title
for each_split in first_title[0].split("/"):
if each_split.startswith("tt"):
title_id = each_split
#extract movie name from first title
movie_name = str(re.search(r'">(.+?)</a>', str(first_title[0])).group(1)).replace(" ","_")
temp_movie_name = movie_name
#put timestamp
epoch = time.time()
movie_name+="$#$"+str(epoch)
# create temp file to store movie name temporarily
with open(scraped_reviews_path + "temp.txt", 'w') as f:
f.write(movie_name)
#check timestamp
current_dir = os.getcwd()
change_dir = scraped_reviews_path
os.chdir(change_dir)
temp = temp_movie_name+"$#$"
old_file_name = glob.glob(temp+"*")
diff = 0
#flag determines if searched movie is already searched within a week or not
#flag = 0 (file available)
#flag = 1 (new search)
flag = 1
if len(old_file_name) > 0:
old_file_name = old_file_name[0]
old_timestamp = old_file_name.split("$#$")[1][:-5]
diff = epoch - float(old_timestamp)
if diff < 604800:
flag = 0
with open(project_path+"flag.txt", "w") as f:
f.write(str(flag))
raise CloseSpider('file available')
else:
os.remove(scraped_reviews_path+old_file_name)
os.remove(predicted_reviews_path+old_file_name)
os.chdir(current_dir)
#form imdb reviews link
reviews_link = imdb["urv_link_part_1"] + title_id + imdb["urv_link_part_2"]
#get chrome driver executable
options = Options()
options.headless = True
chrome_driver = webdriver.Chrome(chrome_driver_path, chrome_options=options)
#go to reviews link
chrome_driver.get(reviews_link)
#click load more button until the button exists
while True:
try:
WebDriverWait(chrome_driver, 10).until(EC.element_to_be_clickable((By.XPATH, imdb["load_more_button"]))).click()
except TimeoutException as ex:
break
#get the number of reviews
num_of_reviews = chrome_driver.find_element_by_xpath(imdb["number_of_reviews"]).text
reviews_no = num_of_reviews.split()[0]
print(reviews_no)
#open all the spoilers
spoiler_click = chrome_driver.find_elements_by_xpath(imdb["spoiler_open"])
for i in range(0, len(spoiler_click)):
if spoiler_click[i].is_displayed():
spoiler_click[i].click()
#get all the reviews
reviews = chrome_driver.find_elements_by_xpath(imdb["reviews"])
#convert reviews to list
reviews_list = [str(review.text).replace("\n"," ") for review in reviews]
#get all the authors
authors = chrome_driver.find_elements_by_xpath(imdb["authors"])
#convert authors to list
authors_list = [a.text for a in authors]
#get all the review dates
review_dates = chrome_driver.find_elements_by_xpath(imdb["review_dates"])
#convert review dates to list
review_dates_list = [rd.text for rd in review_dates]
#get all the titles
titles = chrome_driver.find_elements_by_xpath(imdb["titles"])
#convert titles to list
titles_list = [str(t.text).replace("\n", " ") for t in titles]
#create json_data variable with authors, review dates, titles and reviews
json_data = [
{
"author" : a,
"review_date" : rd,
"title" : t,
"review" : re
} for a, rd, t, re in zip(authors_list, review_dates_list, titles_list, reviews_list)
]
output_filename = scraped_reviews_path + movie_name + ".json"
with open(output_filename, 'w') as f:
json.dump(json_data, f, ensure_ascii=False, indent=4)
#close the chrome driver
chrome_driver.close()
| 36.728477
| 128
| 0.638118
| 4,674
| 0.84277
| 134
| 0.024162
| 0
| 0
| 0
| 0
| 1,409
| 0.254057
|
e1f89f4c50e5d75fea57eee72158205ed8c1ffe8
| 423
|
py
|
Python
|
backend/notifications/admin.py
|
ProgrammingLanguageLeader/TutorsApp
|
f2d5968b5c29ce75f5f634d6076a6e66efc76801
|
[
"MIT"
] | 3
|
2019-02-24T23:30:19.000Z
|
2019-03-27T20:06:53.000Z
|
backend/notifications/admin.py
|
ProgrammingLanguageLeader/TutorsApp
|
f2d5968b5c29ce75f5f634d6076a6e66efc76801
|
[
"MIT"
] | 1
|
2019-03-30T08:58:06.000Z
|
2019-03-30T08:58:06.000Z
|
backend/notifications/admin.py
|
ProgrammingLanguageLeader/TutorsApp
|
f2d5968b5c29ce75f5f634d6076a6e66efc76801
|
[
"MIT"
] | 1
|
2019-03-01T20:10:19.000Z
|
2019-03-01T20:10:19.000Z
|
from django.contrib import admin
from notifications.models import Notification
@admin.register(Notification)
class NotificationAdmin(admin.ModelAdmin):
list_display = (
'sender',
'recipient',
'creation_time',
'verb',
'unread',
)
list_filter = (
'sender',
'recipient',
'unread',
'verb',
)
search_fields = (
'verb',
)
| 17.625
| 45
| 0.553191
| 310
| 0.732861
| 0
| 0
| 340
| 0.803783
| 0
| 0
| 87
| 0.205674
|
e1f95d627c633bc21a45b92e2b2fbf936f530ed6
| 1,916
|
py
|
Python
|
logistic-regression/code.py
|
kalpeshsnaik09/ga-learner-dsmp-repo
|
b0b8b0b1e8f91d6462d1ea129f86595b5200a4c4
|
[
"MIT"
] | null | null | null |
logistic-regression/code.py
|
kalpeshsnaik09/ga-learner-dsmp-repo
|
b0b8b0b1e8f91d6462d1ea129f86595b5200a4c4
|
[
"MIT"
] | null | null | null |
logistic-regression/code.py
|
kalpeshsnaik09/ga-learner-dsmp-repo
|
b0b8b0b1e8f91d6462d1ea129f86595b5200a4c4
|
[
"MIT"
] | null | null | null |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df=pd.read_csv(path)
print(df.head())
X=df.drop(columns='insuranceclaim')
y=df['insuranceclaim']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=6)
# Code ends here
# --------------
import matplotlib.pyplot as plt
# Code starts here
plt.boxplot(X_train['bmi'])
plt.show()
q_value=X_train['bmi'].quantile(0.95)
print(y_train.value_counts())
# Code ends here
# --------------
import seaborn as sns
# Code starts here
relation=X_train.corr()
print(relation)
sns.pairplot(X_train)
plt.show()
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Code starts here
cols=['children','sex','region','smoker']
fig,axes=plt.subplots(2,2)
for i in range(2):
for j in range(2):
col=cols[i*2+j]
sns.countplot(X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
lr=LogisticRegression(random_state=9)
grid=GridSearchCV(estimator=lr,param_grid=parameters)
grid.fit(X_train,y_train)
y_pred=grid.predict(X_test)
accuracy=accuracy_score(y_test,y_pred)
print(accuracy)
# Code ends here
# --------------
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Code starts here
score=roc_auc_score(y_test,y_pred)
y_pred_proba=grid.predict_proba(X_test)[:,1]
fpr,tpr,_=metrics.roc_curve(y_test,y_pred)
roc_auc=roc_auc_score(y_test,y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
# Code ends here
| 20.602151
| 80
| 0.731733
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 456
| 0.237996
|
e1fa2fd607868b6a76f691220804b86d0b59aec1
| 2,227
|
py
|
Python
|
macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 180
|
2018-09-20T07:27:40.000Z
|
2022-03-19T07:55:42.000Z
|
macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 80
|
2018-09-26T18:55:56.000Z
|
2022-02-10T02:03:26.000Z
|
macro_benchmark/SSD_Tensorflow/caffe_to_tensorflow.py
|
songhappy/ai-matrix
|
901078e480c094235c721c49f8141aec7a84e70e
|
[
"Apache-2.0"
] | 72
|
2018-08-30T00:49:15.000Z
|
2022-02-15T23:22:40.000Z
|
"""Convert a Caffe model file to TensorFlow checkpoint format.
Assume that the network built is a equivalent (or a sub-) to the Caffe
definition.
"""
import tensorflow as tf
from nets import caffe_scope
from nets import nets_factory
slim = tf.contrib.slim
# =========================================================================== #
# Main flags.
# =========================================================================== #
tf.app.flags.DEFINE_string(
'model_name', 'ssd_300_vgg', 'Name of the model to convert.')
tf.app.flags.DEFINE_string(
'num_classes', 21, 'Number of classes in the dataset.')
tf.app.flags.DEFINE_string(
'caffemodel_path', None,
'The path to the Caffe model file to convert.')
FLAGS = tf.app.flags.FLAGS
# =========================================================================== #
# Main converting routine.
# =========================================================================== #
def main(_):
# Caffe scope...
caffemodel = caffe_scope.CaffeScope()
caffemodel.load(FLAGS.caffemodel_path)
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
global_step = slim.create_global_step()
num_classes = int(FLAGS.num_classes)
# Select the network.
ssd_class = nets_factory.get_network(FLAGS.model_name)
ssd_params = ssd_class.default_params._replace(num_classes=num_classes)
ssd_net = ssd_class(ssd_params)
ssd_shape = ssd_net.params.img_shape
# Image placeholder and model.
shape = (1, ssd_shape[0], ssd_shape[1], 3)
img_input = tf.placeholder(shape=shape, dtype=tf.float32)
# Create model.
with slim.arg_scope(ssd_net.arg_scope_caffe(caffemodel)):
ssd_net.net(img_input, is_training=False)
init_op = tf.global_variables_initializer()
with tf.Session() as session:
# Run the init operation.
session.run(init_op)
# Save model in checkpoint.
saver = tf.train.Saver()
ckpt_path = FLAGS.caffemodel_path.replace('.caffemodel', '.ckpt')
saver.save(session, ckpt_path, write_meta_graph=False)
if __name__ == '__main__':
tf.app.run()
| 33.238806
| 79
| 0.579704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 836
| 0.375393
|
e1faa2d284c1670dec2da5bc75095f1370cf8e94
| 1,211
|
py
|
Python
|
setup.py
|
danihodovic/django-toolshed
|
78d559db662488bafbd3f701f4c0c5304ae151d9
|
[
"MIT"
] | 3
|
2021-08-09T11:59:16.000Z
|
2021-08-09T12:44:54.000Z
|
setup.py
|
danihodovic/django-toolshed
|
78d559db662488bafbd3f701f4c0c5304ae151d9
|
[
"MIT"
] | null | null | null |
setup.py
|
danihodovic/django-toolshed
|
78d559db662488bafbd3f701f4c0c5304ae151d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import re
from setuptools import find_packages, setup
def get_version(*file_paths):
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
version = get_version("django_toolshed", "__init__.py")
readme = open("README.md").read()
setup(
name="django-toolshed",
version=version,
description="""Your project description goes here""",
long_description=readme,
author="Dani Hodovic",
author_email="you@example.com",
url="https://github.com/danihodovic/django-toolshed",
packages=find_packages(),
include_package_data=True,
install_requires=[],
license="MIT",
keywords="django,app",
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Django :: 2.0",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
],
)
| 28.162791
| 88
| 0.652353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 484
| 0.39967
|
e1fcd5a6b602e7b63e359f1c120e157503211aa4
| 5,686
|
py
|
Python
|
detection/models/roi_extractors/roi_align.py
|
waiiinta/object_detection_lab
|
6af56ab1c0f595181d87163c62e613398ac96af8
|
[
"MIT"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
detection/models/roi_extractors/roi_align.py
|
waiiinta/object_detection_lab
|
6af56ab1c0f595181d87163c62e613398ac96af8
|
[
"MIT"
] | 3
|
2020-06-05T22:42:53.000Z
|
2020-08-24T07:18:54.000Z
|
detection/models/roi_extractors/roi_align.py
|
waiiinta/object_detection_lab
|
6af56ab1c0f595181d87163c62e613398ac96af8
|
[
"MIT"
] | 9
|
2020-10-19T04:53:06.000Z
|
2021-08-31T05:20:01.000Z
|
import tensorflow as tf
from detection.utils.misc import *
class PyramidROIAlign(tf.keras.layers.Layer):
def __init__(self, pool_shape, **kwargs):
'''
Implements ROI Pooling on multiple levels of the feature pyramid.
Attributes
---
pool_shape: (height, width) of the output pooled regions.
Example: (7, 7)
'''
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs, training=True):
'''
Args
---
rois_list: list of [num_rois, (y1, x1, y2, x2)] in normalized coordinates.
feature_map_list: List of [batch, height, width, channels].
feature maps from different levels of the pyramid.
img_metas: [batch_size, 11]
Returns
---
pooled_rois_list: list of [num_rois, pooled_height, pooled_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
'''
rois_list, feature_map_list, img_metas = inputs # [2000 ,4], list:[P2, P3, P4, P5]
pad_shapes = calc_pad_shapes(img_metas)
pad_areas = pad_shapes[:, 0] * pad_shapes[:, 1] # 1216*1216
num_rois_list = [rois.shape.as_list()[0] for rois in rois_list] # data:[2000]
roi_indices = tf.constant(
[i for i in range(len(rois_list)) for _ in range(rois_list[i].shape.as_list()[0])],
dtype=tf.int32
) #[0.....], shape:[2000]
areas = tf.constant(# range(1) range(2000)
[pad_areas[i] for i in range(pad_areas.shape[0]) for _ in range(num_rois_list[i])],
dtype=tf.float32
)#[1216*1216, 1216*1216,...], shape:[2000]
rois = tf.concat(rois_list, axis=0) # [2000, 4]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(rois, 4, axis=1) # 4 of [2000, 1]
h = y2 - y1 # [2000, 1]
w = x2 - x1 # [2000, 1]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
roi_level = tf.math.log( # [2000]
tf.sqrt(tf.squeeze(h * w, 1))
/ tf.cast((224.0 / tf.sqrt(areas * 1.0)), tf.float32)
) / tf.math.log(2.0)
roi_level = tf.minimum(5, tf.maximum( # [2000], clamp to [2-5]
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
# roi_level will indicates which level of feature to use
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled_rois = []
roi_to_level = []
for i, level in enumerate(range(2, 6)): # 2,3,4,5
ix = tf.where(tf.equal(roi_level, level)) # [1999, 1], means 1999 of 2000 select P2
level_rois = tf.gather_nd(rois, ix) # boxes to crop, [1999, 4]
# ROI indices for crop_and_resize.
level_roi_indices = tf.gather_nd(roi_indices, ix) # [19999], data:[0....0]
# Keep track of which roi is mapped to which level
roi_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_rois = tf.stop_gradient(level_rois)
level_roi_indices = tf.stop_gradient(level_roi_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_rois, pool_height, pool_width, channels]
pooled_rois.append(tf.image.crop_and_resize(
feature_map_list[i], level_rois, level_roi_indices, self.pool_shape,
method="bilinear")) # [1, 304, 304, 256], [1999, 4], [1999], [2]=[7,7]=>[1999,7,7,256]
# [1999, 7, 7, 256], [], [], [1,7,7,256] => [2000, 7, 7, 256]
# Pack pooled features into one tensor
pooled_rois = tf.concat(pooled_rois, axis=0)
# Pack roi_to_level mapping into one array and add another
# column representing the order of pooled rois
roi_to_level = tf.concat(roi_to_level, axis=0) # [2000, 1], 1999 of P2, and 1 other P
roi_range = tf.expand_dims(tf.range(tf.shape(roi_to_level)[0]), 1) # [2000, 1], 0~1999
roi_to_level = tf.concat([tf.cast(roi_to_level, tf.int32), roi_range],
axis=1) # [2000, 2], (P, range)
# Rearrange pooled features to match the order of the original rois
# Sort roi_to_level by batch then roi indextf.Tensor([ 0 100001 200002 ... 199801997 199901998 20101999], shape=(2000,), dtype=int32)
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = roi_to_level[:, 0] * 100000 + roi_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape( # k=2000
roi_to_level)[0]).indices[::-1]# reverse the order
ix = tf.gather(roi_to_level[:, 1], ix) # [2000]
pooled_rois = tf.gather(pooled_rois, ix) # [2000, 7, 7, 256]
# 2000 of [7, 7, 256]
pooled_rois_list = tf.split(pooled_rois, num_rois_list, axis=0)
return pooled_rois_list
| 45.854839
| 155
| 0.577559
| 5,624
| 0.989096
| 0
| 0
| 0
| 0
| 0
| 0
| 2,735
| 0.481006
|
e1ff64213edb5548904c05273b193883e930a827
| 150
|
py
|
Python
|
examples/simple_regex/routes/__init__.py
|
nekonoshiri/tiny-router
|
3bb808bcc9f9eb368ee390179dfc5e9d48cf8600
|
[
"MIT"
] | null | null | null |
examples/simple_regex/routes/__init__.py
|
nekonoshiri/tiny-router
|
3bb808bcc9f9eb368ee390179dfc5e9d48cf8600
|
[
"MIT"
] | null | null | null |
examples/simple_regex/routes/__init__.py
|
nekonoshiri/tiny-router
|
3bb808bcc9f9eb368ee390179dfc5e9d48cf8600
|
[
"MIT"
] | null | null | null |
from ..router import Router
from . import create_user, get_user
router = Router()
router.include(get_user.router)
router.include(create_user.router)
| 21.428571
| 35
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c0003725e83dcd344816d0f9a584c175d9cf972f
| 712
|
py
|
Python
|
poetry/packages/constraints/any_constraint.py
|
vanyakosmos/poetry
|
b218969107e49dc57e65dbc0d349e83cbe1f44a8
|
[
"MIT"
] | 2
|
2019-06-19T15:07:58.000Z
|
2019-11-24T14:08:55.000Z
|
poetry/packages/constraints/any_constraint.py
|
vanyakosmos/poetry
|
b218969107e49dc57e65dbc0d349e83cbe1f44a8
|
[
"MIT"
] | 18
|
2020-01-15T04:11:31.000Z
|
2020-06-30T13:24:27.000Z
|
poetry/packages/constraints/any_constraint.py
|
vanyakosmos/poetry
|
b218969107e49dc57e65dbc0d349e83cbe1f44a8
|
[
"MIT"
] | 1
|
2021-04-08T03:26:23.000Z
|
2021-04-08T03:26:23.000Z
|
from .base_constraint import BaseConstraint
from .empty_constraint import EmptyConstraint
class AnyConstraint(BaseConstraint):
def allows(self, other):
return True
def allows_all(self, other):
return True
def allows_any(self, other):
return True
def difference(self, other):
if other.is_any():
return EmptyConstraint()
return other
def intersect(self, other):
return other
def union(self, other):
return AnyConstraint()
def is_any(self):
return True
def is_empty(self):
return False
def __str__(self):
return "*"
def __eq__(self, other):
return other.is_any()
| 18.736842
| 45
| 0.622191
| 619
| 0.869382
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.004213
|