blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d12e1498499716d532c06a62f69e953762cfd604 | be61a9f30274514857ea34297719157f1e5b8447 | /fhir/resources/DSTU2/eligibilityresponse.py | 83758ea3826fdf34e860f691830f098b99803f84 | [
"BSD-3-Clause"
] | permissive | jwygoda/fhir.resources | ceff3a620100d2e875136b86d3e82816c0e60a33 | 5053565570d1ca992d9971d20db813c53fd350b9 | refs/heads/master | 2021-02-05T02:59:17.436485 | 2019-07-18T10:57:33 | 2019-07-18T10:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/EligibilityResponse) on 2019-05-14.
# 2019, SMART Health IT.
from . import domainresource
class EligibilityResponse(domainresource.DomainResource):
""" EligibilityResponse resource.
This resource provides eligibility and plan details from the processing of
an Eligibility resource.
"""
resource_name = "EligibilityResponse"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.created = None
""" Creation date.
Type `FHIRDate` (represented as `str` in JSON). """
self.disposition = None
""" Disposition Message.
Type `str`. """
self.identifier = None
""" Business Identifier.
List of `Identifier` items (represented as `dict` in JSON). """
self.organization = None
""" Insurer.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.originalRuleset = None
""" Original version.
Type `Coding` (represented as `dict` in JSON). """
self.outcome = None
""" complete | error.
Type `str`. """
self.request = None
""" Claim reference.
Type `FHIRReference` referencing `EligibilityRequest` (represented as `dict` in JSON). """
self.requestOrganization = None
""" Responsible organization.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.requestProvider = None
""" Responsible practitioner.
Type `FHIRReference` referencing `Practitioner` (represented as `dict` in JSON). """
self.ruleset = None
""" Resource version.
Type `Coding` (represented as `dict` in JSON). """
super(EligibilityResponse, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(EligibilityResponse, self).elementProperties()
js.extend([
("created", "created", fhirdate.FHIRDate, False, None, False),
("disposition", "disposition", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("organization", "organization", fhirreference.FHIRReference, False, None, False),
("originalRuleset", "originalRuleset", coding.Coding, False, None, False),
("outcome", "outcome", str, False, None, False),
("request", "request", fhirreference.FHIRReference, False, None, False),
("requestOrganization", "requestOrganization", fhirreference.FHIRReference, False, None, False),
("requestProvider", "requestProvider", fhirreference.FHIRReference, False, None, False),
("ruleset", "ruleset", coding.Coding, False, None, False),
])
return js
from . import coding
from . import fhirdate
from . import fhirreference
from . import identifier
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
61d8403f84bf49cda2675407bf44295078cfd9df | 449fb247397f2e029d3d73fc441fac43004b58c5 | /autonetkit/topologies/multi_as.py | 8659401408a0444e06513d550eb6286f9dd73594 | [] | no_license | iainwp/autonetkit | e975697f7b6426abc681c9d14929302ec9dc3c74 | 7bf893fa3849174ee3514ac2859ad53e121fd8b9 | refs/heads/master | 2020-07-22T17:55:52.659795 | 2014-10-14T06:13:51 | 2014-10-14T06:13:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,283 | py | def multi_as():
"""Returns anm with input and physical as house graph"""
import autonetkit
import networkx as nx
from autonetkit.load.load_json import simple_to_nx
# returns a house graph
data = {'directed': False,
'graph': [('node_default',
{'asn': 1,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'None',
'ibgp_role': u'None',
'igp': 'ospf',
'ospf_area': 0,
'platform': 'netkit'}),
('address_family', 'v4'),
('file_type', 'graphml'),
('enable_routing', True),
('edge_default', {'ospf_cost': 1, 'type': 'physical'}),
('specified_int_names', False)],
'links': [{'dst': 'r2',
'dst_port': '_port1',
'id': 'e5',
'ospf_cost': 1,
'src': 'r1',
'src_port': '_port1',
'type': 'physical'},
{'dst': 'r8',
'dst_port': '_port1',
'id': 'e10',
'ospf_cost': 1,
'src': 'r10',
'src_port': '_port1',
'type': 'physical'},
{'dst': 'r9',
'dst_port': '_port1',
'id': 'e11',
'ospf_cost': 1,
'src': 'r10',
'src_port': '_port2',
'type': 'physical'},
{'dst': 'r2',
'dst_port': '_port2',
'id': 'e4',
'ospf_cost': 1,
'src': 'r3',
'src_port': '_port1',
'type': 'physical'},
{'dst': 'r1',
'dst_port': '_port2',
'id': 'e6',
'ospf_cost': 1,
'src': 'r4',
'src_port': '_port1',
'type': 'physical'},
{'dst': 'r2',
'dst_port': '_port3',
'id': 'e13',
'ospf_cost': 1,
'src': 'r4',
'src_port': '_port2',
'type': 'physical'},
{'dst': 'r5',
'dst_port': '_port1',
'id': 'e7',
'ospf_cost': 1,
'src': 'r4',
'src_port': '_port3',
'type': 'physical'},
{'dst': 'r7',
'dst_port': '_port1',
'id': 'e2',
'ospf_cost': 1,
'src': 'r4',
'src_port': '_port4',
'type': 'physical'},
{'dst': 'r3',
'dst_port': '_port2',
'id': 'e3',
'ospf_cost': 1,
'src': 'r5',
'src_port': '_port2',
'type': 'physical'},
{'dst': 'r6',
'dst_port': '_port1',
'id': 'e8',
'ospf_cost': 1,
'src': 'r5',
'src_port': '_port3',
'type': 'physical'},
{'dst': 'r8',
'dst_port': '_port2',
'id': 'e12',
'ospf_cost': 1,
'src': 'r5',
'src_port': '_port4',
'type': 'physical'},
{'dst': 'r8',
'dst_port': '_port3',
'id': 'e9',
'ospf_cost': 1,
'src': 'r6',
'src_port': '_port2',
'type': 'physical'},
{'dst': 'r8',
'dst_port': '_port4',
'id': 'e0',
'ospf_cost': 1,
'src': 'r7',
'src_port': '_port2',
'type': 'physical'},
{'dst': 'r9',
'dst_port': '_port2',
'id': 'e1',
'ospf_cost': 1,
'src': 'r7',
'src_port': '_port3',
'type': 'physical'}],
'multigraph': False,
'nodes': [{'asn': 1,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'A',
'ibgp_role': u'RRC',
'id': 'r1',
'igp': 'ospf',
'label': 'r1',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r1 to r2', 'id': '_port1'},
{'category': 'physical', 'description': 'r1 to r4', 'id': '_port2'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r1 to r2'},
2: {'category': 'physical', 'description': 'r1 to r4'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 555.0,
'y': 285.0},
{'asn': 3,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'None',
'ibgp_role': u'Peer',
'id': 'r10',
'igp': 'ospf',
'label': 'r10',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r10 to r8', 'id': '_port1'},
{'category': 'physical', 'description': 'r10 to r9', 'id': '_port2'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r10 to r8'},
2: {'category': 'physical', 'description': 'r10 to r9'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 765.0,
'y': 500.52000000000004},
{'asn': 1,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'A',
'ibgp_role': u'RRC',
'id': 'r2',
'igp': 'ospf',
'label': 'r2',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r2 to r1', 'id': '_port1'},
{'category': 'physical',
'description': 'r2 to r3', 'id': '_port2'},
{'category': 'physical', 'description': 'r2 to r4', 'id': '_port3'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r2 to r1'},
2: {'category': 'physical', 'description': 'r2 to r3'},
3: {'category': 'physical', 'description': 'r2 to r4'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 645.0,
'y': 255.0},
{'asn': 1,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'A',
'ibgp_role': u'RRC',
'id': 'r3',
'igp': 'ospf',
'label': 'r3',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r3 to r2', 'id': '_port1'},
{'category': 'physical', 'description': 'r3 to r5', 'id': '_port2'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r3 to r2'},
2: {'category': 'physical', 'description': 'r3 to r5'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 765.0,
'y': 255.0},
{'asn': 1,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'A',
'ibgp_role': u'RR',
'id': 'r4',
'igp': 'ospf',
'label': 'r4',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r4 to r1', 'id': '_port1'},
{'category': 'physical',
'description': 'r4 to r2', 'id': '_port2'},
{'category': 'physical',
'description': 'r4 to r5', 'id': '_port3'},
{'category': 'physical', 'description': 'r4 to r7', 'id': '_port4'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r4 to r1'},
2: {'category': 'physical', 'description': 'r4 to r2'},
3: {'category': 'physical', 'description': 'r4 to r5'},
4: {'category': 'physical', 'description': 'r4 to r7'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 645.0,
'y': 345.0},
{'asn': 1,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'A',
'ibgp_role': u'RR',
'id': 'r5',
'igp': 'ospf',
'label': 'r5',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r5 to r4', 'id': '_port1'},
{'category': 'physical',
'description': 'r5 to r3', 'id': '_port2'},
{'category': 'physical',
'description': 'r5 to r6', 'id': '_port3'},
{'category': 'physical', 'description': 'r5 to r8', 'id': '_port4'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r5 to r4'},
2: {'category': 'physical', 'description': 'r5 to r3'},
3: {'category': 'physical', 'description': 'r5 to r6'},
4: {'category': 'physical', 'description': 'r5 to r8'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 765.0,
'y': 345.0},
{'asn': 2,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'None',
'ibgp_role': u'None',
'id': 'r6',
'igp': 'ospf',
'label': 'r6',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r6 to r5', 'id': '_port1'},
{'category': 'physical', 'description': 'r6 to r8', 'id': '_port2'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r6 to r5'},
2: {'category': 'physical', 'description': 'r6 to r8'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 894.616,
'y': 345.0},
{'asn': 3,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'None',
'ibgp_role': u'Peer',
'id': 'r7',
'igp': 'ospf',
'label': 'r7',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r7 to r4', 'id': '_port1'},
{'category': 'physical',
'description': 'r7 to r8', 'id': '_port2'},
{'category': 'physical', 'description': 'r7 to r9', 'id': '_port3'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r7 to r4'},
2: {'category': 'physical', 'description': 'r7 to r8'},
3: {'category': 'physical', 'description': 'r7 to r9'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 645.0,
'y': 435.0},
{'asn': 3,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'None',
'ibgp_role': u'Peer',
'id': 'r8',
'igp': 'ospf',
'label': 'r8',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r8 to r10', 'id': '_port1'},
{'category': 'physical',
'description': 'r8 to r5', 'id': '_port2'},
{'category': 'physical',
'description': 'r8 to r6', 'id': '_port3'},
{'category': 'physical', 'description': 'r8 to r7', 'id': '_port4'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r8 to r10'},
2: {'category': 'physical', 'description': 'r8 to r5'},
3: {'category': 'physical', 'description': 'r8 to r6'},
4: {'category': 'physical', 'description': 'r8 to r7'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 765.0,
'y': 435.0},
{'asn': 3,
'device_type': 'router',
'host': 'localhost',
'ibgp_l3_cluster': u'None',
'ibgp_role': u'Peer',
'id': 'r9',
'igp': 'ospf',
'label': 'r9',
'ospf_area': 0,
'platform': 'netkit',
'ports': [{'category': 'loopback',
'description': 'loopback',
'id': 'Loopback0'},
{'category': 'physical',
'description': 'r9 to r10', 'id': '_port1'},
{'category': 'physical', 'description': 'r9 to r7', 'id': '_port2'}],
'raw_interfaces': {0: {'category': 'loopback', 'description': 'loopback'},
1: {'category': 'physical', 'description': 'r9 to r10'},
2: {'category': 'physical', 'description': 'r9 to r7'}},
'specified_int_names': None,
'syntax': 'quagga',
'vrf': None,
'x': 645.0,
'y': 500.52}]}
graph = simple_to_nx(data)
anm = autonetkit.anm.NetworkModel()
g_in = anm.add_overlay("input")
g_in._replace_graph(nx.Graph(graph))
# TODO: check if should build overlays here rather than clone in?
g_phy = anm["phy"]
g_phy._replace_graph(graph)
return anm | [
"simon.knight@gmail.com"
] | simon.knight@gmail.com |
a3a9e7e725c1b44de191970285de795ee13c6cdb | cc72013ede1b3bb02c32a3d0d199be4f7986c173 | /ch13/anagrams.py | ea593d66359a246b5e448761405cf3e62f20e948 | [] | no_license | alextickle/zelle-exercises | b87d2a1476189954565f5cc97ee1448200eb00d4 | b784ff9ed9b2cb1c56e31c1c63f3e2b52fa37875 | refs/heads/master | 2021-01-19T00:33:19.132238 | 2017-09-14T23:35:35 | 2017-09-14T23:35:35 | 87,182,609 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | def anagrams(s):
if s == "":
return [s]
else:
ans = []
for w in anagrams(s[1:]):
for pos in range(len(w) + 1):
ans.append(w[:pos]+s[0]+w[pos:])
return ans
| [
"alexander.tickle@gmail.com"
] | alexander.tickle@gmail.com |
83bf77fbf76ff64192e41dc2e4a0ba42221f38b8 | 2c32cf726e111b8625265c458feeaea436652e83 | /Trie/implement-trie.py | de17e2d41fb4c7a26a782d2fb9471c1792719d35 | [] | no_license | minhthe/practice-algorithms-and-data-structures | 6fa3bf98e8e2fe98f4e32419fb797b1df4400364 | 488a82dd3a0c797859a6c9e1195d6d579d676073 | refs/heads/master | 2021-05-16T23:01:20.026475 | 2020-09-23T04:17:13 | 2020-09-23T04:17:13 | 250,505,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | '''https://leetcode.com/problems/implement-trie-prefix-tree/'''
class Node:
def __init__(self):
self.child = {}
self.cntWord = 0
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
tmp = self.root
for c in word:
if c not in tmp.child:
tmp.child[c] = Node()
tmp = tmp.child[c]
tmp.cntWord += 1
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
tmp = self.root
for c in word:
if c not in tmp.child: return False
tmp = tmp.child[c]
return tmp.cntWord
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
tmp = self.root
for c in prefix:
if c not in tmp.child:
return False
tmp = tmp.child[c]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix) | [
"minhthe.007@gmail.com"
] | minhthe.007@gmail.com |
44c63246f820eac5a95e7a8276d4bd7d7fcf9c4e | 32e6c2d63d92b1c2eb00ef3d362a7e3deba0f72f | /venv/Scripts/pip3.7-script.py | 63b7a09dee8b2ec35e7254c61a5861719012b0af | [] | no_license | kvineethgithub/example | d8706200e0297793a211b98b2a44eb4c3f76a143 | 0ecb300753fa15fcefb1dd57c454b8d45ac7f58f | refs/heads/master | 2020-05-31T06:43:29.201072 | 2019-06-04T06:52:01 | 2019-06-04T06:52:01 | 190,147,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!C:\Users\Intel\PycharmProjects\django_1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"you@example.com"
] | you@example.com |
fd0a0b5a5b5e3cc90a4fd846d0b27ae1c4e1dcd3 | 15d7b8fb4fd92984844a9ef8b60213ad5b7344d4 | /fairseq/criterions/wav2vec_criterion.py | cc454b93096c428ce456d55c18ce7ad99c16564d | [
"MIT"
] | permissive | seeledu/fairseq | 18a80cecd9dbdf428569cda516aa290dca60a4d4 | 148327d8c1e3a5f9d17a11bbb1973a7cf3f955d3 | refs/heads/master | 2023-02-25T08:11:53.009964 | 2021-01-28T22:18:48 | 2021-01-28T22:21:10 | 307,387,657 | 0 | 0 | MIT | 2021-01-19T08:30:55 | 2020-10-26T13:49:16 | null | UTF-8 | Python | false | false | 7,130 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.logging.meters import safe_round
@dataclass
class Wav2VecCriterionConfig(FairseqDataclass):
infonce: bool = field(
default=False,
metadata={
"help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)"
},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig)
class Wav2vecCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
weights = None
if hasattr(model, "get_target_weights") and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
if self.infonce:
loss = F.cross_entropy(
logits,
target,
reduction="sum" if reduce else "none",
)
else:
loss = F.binary_cross_entropy_with_logits(
logits,
target.float(),
weights,
reduction="sum" if reduce else "none",
)
sample_size = target.numel() if self.infonce else target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
self.loss_weights
), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
logging_output = {
"loss": loss.item() if reduce else loss,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
for lk in self.log_keys:
# Only store "logits" and "target" for computing MAP and MAUC
# during validation
if lk == "logits":
if not self.training:
logging_output["logits"] = logits.cpu().numpy()
elif lk == "target":
if not self.training:
logging_output["target"] = target.cpu().numpy()
elif lk in net_output:
logging_output[lk] = float(net_output[lk])
if len(losses) > 1:
for i, l in enumerate(losses):
logging_output[f"loss_{i}"] = l.item()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
logging_output["correct"] = corr
logging_output["count"] = count
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
correct = sum(log.get("correct", 0) for log in logging_outputs)
metrics.log_scalar("_correct", correct)
total = sum(log.get("count", 0) for log in logging_outputs)
metrics.log_scalar("_total", total)
if total > 0:
metrics.log_derived(
"accuracy",
lambda meters: safe_round(
meters["_correct"].sum / meters["_total"].sum, 5
)
if meters["_total"].sum > 0
else float("nan"),
)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"correct",
"count",
}
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss"):
metrics.log_scalar(
k, val / sample_size / math.log(2), sample_size, round=3
)
else:
metrics.log_scalar(k, val / len(logging_outputs), round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
86806da74e6fdfe7c4fefd8d632d1c262190b761 | 8d8663095c119b2c175247bdf7b5fa613c378061 | /2주차 실습/1316.py | d42921edb93ddaa10f650f874673054a04873574 | [] | no_license | mjson1954/piro13 | 047dfc7c6090548f4f67c8b36bd4e06eea493a79 | c81a731ff245231111065482b0cb5edf2687425c | refs/heads/master | 2022-11-20T00:52:49.364816 | 2020-07-17T11:53:05 | 2020-07-17T11:53:05 | 277,566,415 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | def check_group(word):
last_alphabet =""
alphabets = [] # 나온 alphabet list
# aabb
for letter in word:
if letter == last_alphabet: # a
continue
else:
if letter in alphabets:
return False # 그룹 단어가 아님
alphabets.append(letter) # [a, b]
last_alphabet = letter # a->b update
return True # for문을 모두 통과하면 그룹 단어
count = int(input()) # 단어 개수 입력받기
result=0 # 초기화
for _ in range(count):
word = input() # 단어 입력받기
if check_group(word):
result+=1
print(result)
#1. letter가 연속적인가? aabb-> 직전 알파벳이 letter와 같은지 체크
#2. 이미 나왔던 단어인가? aabbaa
#3. 연속을 깨뜨린 letter가 이미 나왔던 단어인지 체크 -> False이면 그룹 단어가 아님 | [
"mjson1954@gmail.com"
] | mjson1954@gmail.com |
7d5984884e7de5b64ec6eac294903973f53520a7 | 032a0c939d96d0e5307dbce86e11faf7060f4ed9 | /lte/gateway/python/magma/pipelined/qos/tc_ops_pyroute2.py | 8d804a69815a66d7d80268276aa7d934f874b634 | [
"BSD-3-Clause"
] | permissive | radha0018/magma | cac9ff3491dd2661e5dc0aa1f9a304a5428e2d2a | 8436966a4bb3cf7fdc3f567704062b6f9568db25 | refs/heads/master | 2023-05-05T08:26:07.132969 | 2021-05-27T18:44:44 | 2021-05-27T18:44:44 | 371,097,174 | 0 | 2 | NOASSERTION | 2021-05-26T16:26:21 | 2021-05-26T16:15:53 | Go | UTF-8 | Python | false | false | 5,564 | py | """
Copyright 2021 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import pprint
from pyroute2 import IPRoute, NetlinkError
from .tc_ops import TcOpsBase
LOG = logging.getLogger('pipelined.qos.tc_pyroute2')
QUEUE_PREFIX = '1:'
PROTOCOL = 0x0800
PARENT_ID = 0x10000
class TcOpsPyRoute2(TcOpsBase):
"""
Create TC scheduler and corresponding filter
"""
def __init__(self):
self._ipr = IPRoute()
self._iface_if_index = {}
LOG.info("initialized")
def create_htb(self, iface: str, qid: str, max_bw: int, rate: str,
parent_qid: str = None) -> int:
"""
Create HTB class for a UE session.
Args:
iface: Egress interface name.
qid: qid number.
max_bw: ceiling in bits per sec.
rate: rate limiting.
parent_qid: HTB parent queue.
Returns:
zero on success.
"""
LOG.debug("Create HTB iface %s qid %s max_bw %s rate %s", iface, qid, max_bw, rate)
try:
# API needs ceiling in bytes per sec.
max_bw = max_bw / 8
if_index = self._get_if_index(iface)
htb_queue = QUEUE_PREFIX + qid
ret = self._ipr.tc("add-class", "htb", if_index,
htb_queue, parent=parent_qid,
rate=str(rate).lower(), ceil=max_bw, prio=1)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("create-htb error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def del_htb(self, iface: str, qid: str) -> int:
"""
Delete given queue from HTB classed
Args:
iface: interface name
qid: queue-id of the HTB class
Returns:
"""
LOG.debug("Delete HTB iface %s qid %s", iface, qid)
try:
if_index = self._get_if_index(iface)
htb_queue = QUEUE_PREFIX + qid
ret = self._ipr.tc("del-class", "htb", if_index, htb_queue)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("del-htb error error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def create_filter(self, iface: str, mark: str, qid: str, proto: int = PROTOCOL) -> int:
"""
Create TC Filter for given HTB class.
"""
LOG.debug("Create Filter iface %s qid %s", iface, qid)
try:
if_index = self._get_if_index(iface)
class_id = int(PARENT_ID) | int(qid, 16)
ret = self._ipr.tc("add-filter", "fw", if_index, int(mark, 16),
parent=PARENT_ID,
prio=1,
protocol=proto,
classid=class_id)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("create-filter error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def del_filter(self, iface: str, mark: str, qid: str, proto: int = PROTOCOL) -> int:
"""
Delete TC filter.
"""
LOG.debug("Del Filter iface %s qid %s", iface, qid)
try:
if_index = self._get_if_index(iface)
class_id = int(PARENT_ID) | int(qid, 16)
ret = self._ipr.tc("del-filter", "fw", if_index, int(mark, 16),
parent=PARENT_ID,
prio=1,
protocol=proto,
classid=class_id)
LOG.debug("Return: %s", ret)
except (ValueError, NetlinkError) as ex:
LOG.error("del-filter error : %s", ex.code)
LOG.debug(ex, exc_info=True)
return ex.code
return 0
def create(self, iface: str, qid: str, max_bw: int, rate=None,
parent_qid: str = None, proto=PROTOCOL) -> int:
err = self.create_htb(iface, qid, max_bw, rate, parent_qid)
if err:
return err
err = self.create_filter(iface, qid, qid, proto)
if err:
return err
return 0
def delete(self, iface: str, qid: str, proto=PROTOCOL) -> int:
err = self.del_filter(iface, qid, qid, proto)
if err:
return err
err = self.del_htb(iface, qid)
if err:
return err
return 0
def _get_if_index(self, iface: str):
if_index = self._iface_if_index.get(iface, -1)
if if_index == -1:
if_index = self._ipr.link_lookup(ifname=iface)
self._iface_if_index[iface] = if_index
return if_index
def _print_classes(self, iface):
if_index = self._get_if_index(iface)
pprint.pprint(self._ipr.get_classes(if_index))
def _print_filters(self, iface):
if_index = self._get_if_index(iface)
pprint.pprint(self._ipr.get_filters(if_index))
| [
"noreply@github.com"
] | radha0018.noreply@github.com |
6b0742c489906576ca7ec31d21f3aa8d5c4568d3 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_hockeyj85_b.py | 89fd456eefc2119405fc60a848830b5031bc2b06 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 292 | py | from sys import stdout
T = int(raw_input())
for t in range(T):
stdout.write("Case #"+str(t+1)+": ")
line = raw_input()
last = "roflcopter"
flips = 0
for a in line:
if a != last:
flips += 1
last = a
if line[len(line) - 1] != '-':
flips -= 1
stdout.write(str(flips)+"\n")
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
0f9e9852bc81691ad0a672e8840592cc337887e5 | cba46e28e6f60d9bd8cc8c24a3ff8e065e5a8e49 | /scrap_trade_proj/doc_repo/models.py | ecbf73f47f21efe2c10d67aed2895289d3d97b2a | [] | no_license | Horac-Bouthon/scrap-trade-4 | fb7e9f8f9ec41446318ce03ad5ff7024ad795771 | 7686703ce5783dd4a48dc1d9600cda01aa554faa | refs/heads/master | 2022-12-12T21:52:38.209500 | 2020-03-17T07:50:30 | 2020-03-17T07:50:30 | 227,142,003 | 0 | 0 | null | 2022-11-22T04:39:35 | 2019-12-10T14:33:20 | Python | UTF-8 | Python | false | false | 7,097 | py | from django.db import models
from django.conf import settings
from django.urls import reverse
import uuid
from pdf2image import convert_from_path
from django.utils.translation import gettext_lazy as _
from django.utils import translation as tr
from translatable.models import TranslatableModel, get_translation_model
from project_main.models import Project
from integ.models import OpenId
from customers.models import (
ProjectCustomUser,
)
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from scrap_trade_proj.settings import THUMB_SIZE
import os
from PIL import Image
from io import BytesIO
from django.core.files.base import ContentFile
# Create your models here.
class DocType(TranslatableModel):
type_key = models.CharField(
max_length=20,
default="type",
verbose_name=tr.pgettext_lazy('DocType definition', 'Type key'),
help_text=tr.pgettext_lazy('DocType definition','Type key'),
null=True,
blank=True,
)
class Meta:
verbose_name = tr.pgettext_lazy('DocType definition', 'Document type')
verbose_name_plural = tr.pgettext_lazy('DocType definition', 'Document type')
def __str__(self):
return self.get_type_name()
def get_type_name(self):
lang = tr.get_language()
return self.translated('type_name', default=None, language=lang, fallback=True)
class DocTypeTranslation(get_translation_model(DocType, "doctype")):
type_name = models.CharField(
verbose_name=_('Type name'),
help_text=_("Display name of document type."),
max_length=50,
null=True,
blank=True,
unique=False
)
class Document(models.Model):
open_id = models.ForeignKey(
OpenId,
on_delete=models.CASCADE,
verbose_name=_('Open ID'),
help_text=_("Document Connection Key."),
related_name='my_docs',
null=True, blank=True,
)
doc_name = models.CharField(
verbose_name=_('Document name'),
help_text=_("Name of the document."),
max_length=100,
null=True,
blank=True,
unique=False,
)
doc_description = models.TextField(
verbose_name=_('Document description'),
help_text=_("Description of the document."),
null=True,
blank=True,
unique=False,
)
type = models.ForeignKey(
DocType,
on_delete=models.CASCADE,
verbose_name=tr.pgettext_lazy('Document definition', 'Type'),
help_text=tr.pgettext_lazy('Document definition','Document type'),
related_name="my_docs",
)
created_by = models.ForeignKey(
ProjectCustomUser,
on_delete=models.SET_NULL,
verbose_name=tr.pgettext_lazy('Document definition', 'Created by'),
help_text=tr.pgettext_lazy('Document definition','Link to creator'),
null=True, blank=True,
related_name="my_documents",
)
created_at = models.DateTimeField(auto_now_add=True,)
file = models.FileField(
upload_to='doc_repository/%Y/%m/%d/',
verbose_name=tr.pgettext_lazy('Document file', 'File'),
null=True,
blank=True,
)
thumbnail = models.ImageField(
upload_to='doc_thumbs/%Y/%m/%d/',
editable=False,
null=True,
blank=True,
)
open_id = models.ForeignKey(
OpenId,
on_delete=models.CASCADE,
verbose_name=tr.pgettext_lazy('UserProfile definition', 'Open id'),
help_text=tr.pgettext_lazy('UserProfile definition','Link to integration key'),
related_name='my_docs',
null=True, blank=True,
)
class Meta:
verbose_name = tr.pgettext_lazy('Document definition', 'Document')
verbose_name_plural = tr.pgettext_lazy('Document definition', 'Documents')
def __str__(self):
return '{} {} {}'.format(self.pk,
self.doc_name,
self.created_at,
)
def is_picture(self):
return self.type == DocType.objects.get(type_key = 'picture')
def is_pdf(self):
return self.type == DocType.objects.get(type_key = 'pdf')
def is_file(self):
return self.type == DocType.objects.get(type_key = 'file')
def save(self, *args, **kwargs):
"""
Make and save the thumbnail for the photo here.
"""
super().save(*args, **kwargs)
self.make_thumbnail()
def make_thumbnail(self):
"""
Create and save the thumbnail for the photo (simple resize with PIL).
"""
if self.type == DocType.objects.get(type_key = 'picture') and self.thumbnail == None:
return self.make_picture_thumb()
if self.type == DocType.objects.get(type_key = 'pdf') and self.thumbnail == None:
return self.make_pdf_thumb()
return True
def make_picture_thumb(self):
try:
image = Image.open(self.file.path)
except:
return False
image.thumbnail(THUMB_SIZE, Image.ANTIALIAS)
# Path to save to, name, and extension
thumb_name, thumb_extension = os.path.splitext(self.file.name)
xl = thumb_name.split('/')
thumb_real_name = xl[-1]
thumb_extension = thumb_extension.lower()
thumb_filename = thumb_real_name + '_thumb' + thumb_extension
if thumb_extension in ['.jpg', '.jpeg']:
FTYPE = 'JPEG'
elif thumb_extension == '.gif':
FTYPE = 'GIF'
elif thumb_extension == '.png':
FTYPE = 'PNG'
else:
return False # Unrecognized file type
# Save thumbnail to in-memory file as StringIO
temp_thumb = BytesIO()
image.save(temp_thumb, FTYPE)
temp_thumb.seek(0)
# Load a ContentFile into the thumbnail field so it gets saved
self.thumbnail.save(thumb_filename, ContentFile(temp_thumb.read()), save=True)
temp_thumb.close()
return True
def make_pdf_thumb(self):
try:
pages = convert_from_path(self.file.path, 500)
print('pages = {}'.format(pages))
except:
print('error excepted')
return False
# Path to save to, name, and extension
thumb_name, thumb_extension = os.path.splitext(self.file.name)
xl = thumb_name.split('/')
thumb_real_name = xl[-1]
thumb_filename = thumb_real_name + '_thumb.jpg'
FTYPE = 'JPEG'
# Save thumbnail to in-memory file as StringIO
temp_thumb = BytesIO()
for page in pages:
page.save(temp_thumb, FTYPE)
temp_thumb.seek(0)
# Load a ContentFile into the thumbnail field so it gets saved
self.thumbnail.save(thumb_filename, ContentFile(temp_thumb.read()), save=True)
temp_thumb.close()
img = Image.open(self.thumbnail.path)
if img.height > 100 or img.width > 100:
img.thumbnail(THUMB_SIZE, Image.ANTIALIAS)
img.save(self.thumbnail.path)
return True
| [
"tbrown.wolf@ubk.cz"
] | tbrown.wolf@ubk.cz |
09d004b1906893a51b1d0ef124aa91e4433ed1cd | 8a63821681b29f196a0dcf19308a75679f89adaf | /Algorithm/布隆过滤算法.py | f3cc26a121bba26e19360434af297a665e9ad9b0 | [] | no_license | Breathleas/notes-4 | 2b4391b6205660dae256c4132ecb3f953061a2f7 | 6c11e4583e191da323d8ffdc83534e9582036ae1 | refs/heads/master | 2021-10-09T03:18:08.560660 | 2018-12-20T14:46:31 | 2018-12-20T14:46:31 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 818 | py | ----------------------------
布隆过滤算法 |
----------------------------
# 在N多数据中,判断数据是否存在
# 添加逻辑
1,存入数据,使用多个hash函数对数据进行运算
v1 = hash3(key)
v2 = hash3(key)
v3 = hash3(key)
2,多个hash值取模数组长度,把得到的结果角标设置为1
arr[v1 % arr.length] = 1;
arr[v2 % arr.length] = 1;
arr[v3 % arr.length] = 1;
# 判断逻辑
1,使用多个hash函数对数据进行运算
v1 = hash3(key)
v2 = hash3(key)
v3 = hash3(key)
2,多个hash值取模数组长度,判断结果角标是否都为1,如果是则包含,任何非1则不包含
arr[v1 % arr.length] == 1 &&
arr[v2 % arr.length] == 1 &&
arr[v3 % arr.length] == 1
# 注意
* hash运算次数越多,误报的几率越小
| [
"747692844@qq.com"
] | 747692844@qq.com |
2ca649bd865704e92b59ad46113905a39e6e9ecf | 070e06d721d450260f70fed0811b5dd147d1ea10 | /zhihudaily/cache.py | c49f84aa4649b4cb8f6609ea4b6296688742e65a | [
"MIT"
] | permissive | lord63/zhihudaily | b1e411c6a93a4cc0ec629336259021baff81d6f7 | c6aa147d146223bb5842297e58a702b574f7dce5 | refs/heads/master | 2021-07-13T16:34:51.370694 | 2020-08-29T00:32:08 | 2020-08-29T00:32:08 | 30,679,182 | 2 | 2 | MIT | 2021-03-19T21:31:47 | 2015-02-12T01:27:15 | CSS | UTF-8 | Python | false | false | 137 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask_caching import Cache
cache = Cache()
| [
"lord63.j@gmail.com"
] | lord63.j@gmail.com |
3474ecb3d40fcd0b061ae0df216446bedbe133df | 5f9e0c226c6f99f04446d60cd21282e7e6b05d2c | /shopaholic.py | 29a69172980d2cbfba97c78833ed1b6795f42e57 | [] | no_license | JONNY-ME/my-kattis-solution | 867ac267dbb5faa6f7c2af35b435498a22ae269d | 51c70e0fd25f1f369cdcd2ce49a54d5d0df2358e | refs/heads/main | 2023-06-17T20:04:04.701038 | 2021-07-16T09:35:35 | 2021-07-16T09:35:35 | 386,583,581 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | n=int(input());t=(n//3)*3;s=0
l=sorted([int(i) for i in input().split()], reverse=True)[:t]
for i in range(2, t, 3):s+=l[i]
print(s) | [
"yohannesmelese4@gmail.com"
] | yohannesmelese4@gmail.com |
91f3b83f2e2165edc7225395b0c2597f37f99802 | d0237e274f34b6a02f1d78b668a55cb150833435 | /src/yss/yss/root/__init__.py | 4a2d58135ce75c912bdf61978ef9fa5cccd8fa88 | [
"BSD-3-Clause-Modification"
] | permissive | notaliens/youshouldsing | f2a90cc7b7f5d9c64052c72b77827e2c77cd8b40 | 948f7fb30a12eccdc34bb23db4e139afab8ee782 | refs/heads/master | 2021-01-01T17:28:44.739929 | 2019-01-21T05:33:01 | 2019-01-21T05:33:01 | 13,230,677 | 2 | 0 | null | 2013-10-02T03:01:26 | 2013-09-30T23:56:46 | CSS | UTF-8 | Python | false | false | 2,375 | py | import pkg_resources
from substanced.schema import Schema
from substanced.property import PropertySheet
from substanced.interfaces import IRoot
import colander
from pyramid.security import (
Allow,
Everyone,
Authenticated,
)
from substanced.event import subscribe_root_added
from substanced.util import set_acl
from . import comments
class RootSchema(Schema):
""" The schema representing site properties. """
max_framerate = colander.SchemaNode(
colander.Int(),
title="Max Frame Rate",
missing=1,
)
class RootPropertySheet(PropertySheet):
schema = RootSchema()
@subscribe_root_added()
def root_added(event):
registry = event.registry
root = event.object
acl = list(root.__acl__)
acl.extend(
[
(Allow, Everyone, 'view'),
(Allow, Everyone, 'yss.indexed'),
(Allow, Authenticated, 'yss.like'),
]
)
set_acl(root, acl)
root.title = root.sdi_title = 'You Should Sing'
root.max_framerate = 30
root['catalogs'].add_catalog('yss')
root['songs'] = registry.content.create('Songs')
set_acl(root['songs'], [
(Allow, Authenticated, 'yss.upload'),
(Allow, Authenticated, 'yss.record'),
])
performers = root['performers'] = registry.content.create('Performers')
blameme = registry.content.create('Performer')
performers['blameme'] = blameme
blameme['recordings'] = registry.content.create('Recordings')
blameme['photo'] = registry.content.create('File')
blameme['photo_thumbnail'] = registry.content.create('File')
blameme.user = root['principals']['users']['admin']
timings_json = pkg_resources.resource_string(
'yss', 'blackbird.json').decode('utf-8')
song = registry.content.create(
'Song',
'Blackbird',
'The Beatles',
timings=timings_json,
lyrics=timings_json,
audio_stream=pkg_resources.resource_stream('yss', 'blackbird.opus')
)
root['songs']['blackbird'] = song
song.mimetype = 'audio/opus'
song.uploader = blameme
def performer(request):
user = request.user
if user is not None:
return user.performer
def includeme(config):
config.add_propertysheet('YSS', RootPropertySheet, IRoot)
config.add_request_method(performer, reify=True)
config.include(comments)
| [
"chrism@plope.com"
] | chrism@plope.com |
4aeb6283041e70a8c1de4a1ba7dc3d632e950b36 | a3d2f81c04bde252ef7554e65ecbf2c32ce3c2dc | /feincms/views/decorators.py | 62727ae79a4ef972fdad1051bad1b8e7a043a6ec | [
"BSD-2-Clause"
] | permissive | natea/feincms | 3dd6949195352ad96e13c2f3b4d8d7a1677a97b8 | 1d45e3aae5fba6e4a2eccf8ee7675b2ffff2b70c | refs/heads/master | 2020-12-25T10:10:14.358175 | 2011-03-28T14:56:55 | 2011-03-28T14:56:55 | 1,497,290 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | from django.http import HttpResponse
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps
from feincms.module.page.models import Page
def add_page_to_extra_context(view_func):
"""
Adds the best-match page to the extra_context keyword argument. Mainly used
to provide generic views which integrate into the page module.
"""
def inner(request, *args, **kwargs):
kwargs.setdefault('extra_context', {})
kwargs['extra_context']['feincms_page'] = Page.objects.best_match_for_request(request)
return view_func(request, *args, **kwargs)
return wraps(view_func)(inner)
def standalone(view_func):
"""
Marks the view method as standalone view; this means that
``HttpResponse`` objects returned from ``ApplicationContent``
are returned directly, without further processing.
"""
def inner(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
response.standalone = True
return response
return wraps(view_func)(inner) | [
"mk@spinlock.ch"
] | mk@spinlock.ch |
6e150deceef1c7a9b30fb1a8608864d3f6d44be6 | c0c4fe8f9aff2e7684fcaf10329f963873753b2a | /doc/examples/scripts/sequence/thca_synthase_polymorphism.py | b480f9e3c1e9a2e58d08430f51d6283c83744cbb | [
"BSD-3-Clause"
] | permissive | thomasnevolianis/biotite | 85e1b9d6a1fbb5d9f81501a8ebc617bc26388ab9 | 916371eb602cfcacb2d5356659298ef38fa01fcc | refs/heads/master | 2022-11-30T19:40:53.017368 | 2020-08-04T07:00:59 | 2020-08-04T07:00:59 | 285,375,415 | 0 | 0 | BSD-3-Clause | 2020-08-05T18:41:48 | 2020-08-05T18:41:47 | null | UTF-8 | Python | false | false | 4,842 | py | """
Polymorphisms in the THCA synthase gene
=======================================
The THCA synthase catalyzes the last step in the synthesis of
tetrahydrocannabinolic acid (THCA), the precursor molecule of
tetrahydrocannabinol (THC).
Two types of *cannabis sativa* are distinguished: While the *drug-type*
strains produce high levels of THCA, *fiber-type* strains produce a low
amount. One molecular difference between these two types are
polymorphisms in THCA synthase gene [1]_.
This script takes THCA synthase gene sequences from different
*cannabis sativa* strains, translates them into protein sequences and
creates a consensus sequence for each of the two strain types.
Eventually, an alignment is plotted depicting the polymorphic positions
between the two consensus sequences.
.. [1] M Kojoma, H Seki, S Yoshida and T Muranaka,
"DNA polymorphisms in the tetrahydrocannabinolic acid (THCA) synthase
gene in 'drug-type' and 'fiber-type' Cannabis sativa L."
Forensic Sci Int, 159, 132-140 (2006).
"""
# Code source: Patrick Kunzmann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import biotite.sequence as seq
import biotite.sequence.align as align
import biotite.sequence.io.genbank as gb
import biotite.sequence.align as align
import biotite.sequence.graphics as graphics
import biotite.database.entrez as entrez
import biotite.application.clustalo as clustalo
# Search for DNA sequences that belong to the cited article
query = entrez.SimpleQuery("Forensic Sci. Int.", "Journal") \
& entrez.SimpleQuery("159", "Volume") \
& entrez.SimpleQuery("132-140", "Page Number")
uids = entrez.search(query, db_name="nuccore")
# Download and read file containing the Genbank records for the THCA
# synthase genes
multi_file = gb.MultiFile.read(entrez.fetch_single_file(
uids, file_name=None, db_name="nuccore", ret_type="gb"
))
# This dictionary maps the strain ID to the protein sequence
sequences = {}
for gb_file in multi_file:
annotation = gb.get_annotation(gb_file)
# Find ID of strain in 'source' feature
strain = None
for feature in annotation:
if feature.key == "source":
strain = int(feature.qual["strain"])
assert strain is not None
# Find corresponding protein sequence in 'CDS' feature
sequence = None
for feature in annotation:
if feature.key == "CDS":
sequence = seq.ProteinSequence(
# Remove whitespace in sequence
# resulting from line breaks
feature.qual["translation"].replace(" ", "")
)
assert sequence is not None
sequences[strain] = sequence
# None of the THCA synthase variants have an insertion or deletion
# -> each one should have the same sequence length
seq_len = len(list(sequences.values())[0])
for sequence in sequences.values():
assert len(sequence) == seq_len
# Create consensus sequences for the drug-type and fiber-type cannabis
# strains
def create_consensus(sequences):
seq_len = len(sequences[0])
consensus_code = np.zeros(seq_len, dtype=int)
for seq_pos in range(seq_len):
# Count the number of occurrences of each amino acid
# at the given sequence position
counts = np.bincount(
[sequence.code[seq_pos] for sequence in sequences]
)
# The consensus amino acid is the most frequent amino acid
consensus_code[seq_pos] = np.argmax(counts)
# Create empty ProteinSequence object...
consensus_sequence = seq.ProteinSequence()
# ...and fill it with the sequence code containing the consensus
# sequence
consensus_sequence.code = consensus_code
return consensus_sequence
drug_type_consensus = create_consensus(
[sequences[strain] for strain in (1, 10, 13, 20, 53, 54)]
)
fiber_type_consensus = create_consensus(
[sequences[strain] for strain in (9, 5, 11, 45, 66, 68, 78)]
)
# Create an alignment for visualization purposes
# No insertion/deletions -> Align ungapped
matrix = align.SubstitutionMatrix.std_protein_matrix()
alignment = align.align_ungapped(
drug_type_consensus, fiber_type_consensus, matrix=matrix
)
# A colormap for hightlighting sequence dissimilarity:
# At low similarity the symbols are colored red,
# at high similarity the symbols are colored white
cmap = LinearSegmentedColormap.from_list(
"custom", colors=[(1.0, 0.3, 0.3), (1.0, 1.0, 1.0)]
# ^ reddish ^ white
)
fig = plt.figure(figsize=(8.0, 6.0))
ax = fig.add_subplot(111)
graphics.plot_alignment_similarity_based(
ax, alignment, matrix=matrix, symbols_per_line=50,
labels=["Drug-type", "Fiber-type"],
show_numbers=True, cmap=cmap, symbol_size=8
)
fig.tight_layout()
plt.show() | [
"patrick.kunzm@gmail.com"
] | patrick.kunzm@gmail.com |
b9744bf4821bf09b5f67a2fb97e23214e355a077 | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/non_slurm/max_pwr/job2.py | 82a946d08723b6eac162e05a65750b65e8a2c09f | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,940 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.001
args_model = 'vgg16'
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 5
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
ckpt_qual_dict = {}
while True:
if os.path.exists('ckpt_qual.json'):
os.rename('ckpt_qual.json', 'ckpt_qual_lock.json')
break
else:
time.sleep(1)
with open('ckpt_qual_lock.json', 'r') as fp:
ckpt_qual_dict = json.load(fp)
ckpt_qual_dict[job_name] = 1
json_file2 = json.dumps(ckpt_qual_dict)
with open('ckpt_qual_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('ckpt_qual_lock.json', 'ckpt_qual.json')
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=total_epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
os.rename('finish.json', 'finish_lock.json')
break
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
| [
"baolin.li1994@gmail.com"
] | baolin.li1994@gmail.com |
76bdb93913f9d300ebad0d08e7d3e540f3824537 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/mayavi/enthought/mayavi/core/filter.py | 73f5839cd3d9347f5fc23d824b5081341fc89a4a | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,988 | py | """The base filter class from which all MayaVi filters derive.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from enthought.traits.api import List, Str
# Local imports
from enthought.mayavi.core.source import Source
from enthought.mayavi.core.pipeline_base import PipelineBase
from enthought.mayavi.core.pipeline_info import (PipelineInfo,
get_tvtk_dataset_name)
######################################################################
# `Filter` class.
######################################################################
class Filter(Source):
""" Base class for all the Mayavi filters.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The inputs for this filter.
inputs = List(PipelineBase, record=False)
# The icon
icon = Str('filter.ico')
# The human-readable type for this object
type = Str(' filter')
# Information about what this object can consume.
input_info = PipelineInfo(datasets=['any'])
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(Filter, self).__init__(**traits)
# Let the filter setup its pipeline.
self.setup_pipeline()
def __get_pure_state__(self):
d = super(Filter, self).__get_pure_state__()
# Inputs are setup dynamically, don't pickle them.
d.pop('inputs', None)
return d
######################################################################
# `Filter` interface.
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
pass
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
raise NotImplementedError
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
# Invoke render to update any changes.
self.render()
# Propagate the data_changed event.
self.data_changed = True
######################################################################
# `Base` interface
######################################################################
def start(self):
"""This is invoked when this object is added to the mayavi
pipeline. Note that when start is invoked, all the other
information for the pipeline should be already set.
"""
# Do nothing if we are already running.
if self.running:
return
# Setup event handlers.
self._setup_event_handlers()
# Update the pipeline.
self.update_pipeline()
# Call parent method to start the children and set the state.
super(Filter, self).start()
def stop(self):
"""Invoked when this object is removed from the mayavi
pipeline. This is where you remove your actors from the
scene.
"""
if not self.running:
return
# Teardown event handlers.
self._teardown_event_handlers()
# Call parent method to stop the children and set the state.
super(Filter, self).stop()
######################################################################
# Non-public interface
######################################################################
def _set_outputs(self, new_outputs):
"""Set `self.outputs` to the given list of `new_outputs`. You
should always use this method to set `self.outputs`.
"""
old_outputs = self.outputs
self.outputs = new_outputs
if len(new_outputs) > 0:
self.output_info.datasets = \
[get_tvtk_dataset_name(new_outputs[0])]
if old_outputs == self.outputs:
# Even if the outputs don't change we want to propagate a
# data_changed event since the data could have changed.
self.data_changed = True
def _inputs_changed(self, old, new):
if self.running:
self.update_pipeline()
self._setup_input_events(old, new)
def _inputs_items_changed(self, list_event):
if self.running:
self.update_pipeline()
self._setup_input_events(list_event.removed, list_event.added)
def _setup_event_handlers(self):
self._setup_input_events([], self.inputs)
def _teardown_event_handlers(self):
self._setup_input_events(self.inputs, [])
def _setup_input_events(self, removed, added):
for input in removed:
input.on_trait_event(self.update_pipeline, 'pipeline_changed',
remove=True)
input.on_trait_event(self.update_data, 'data_changed',
remove=True)
for input in added:
input.on_trait_event(self.update_pipeline, 'pipeline_changed')
input.on_trait_event(self.update_data, 'data_changed')
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
57e5d03c8401a81a3f6c47bf41522f40cacefad2 | b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1 | /vmraid/desk/form/assign_to.py | 0b937b3dc417bfc913934af983ff322a20a1063c | [
"MIT"
] | permissive | vmraid/vmraid | a52868c57b1999a8d648441eb9cd05815204345d | 3c2e2a952003ba7ea2cf13673b9e79e127f4166e | refs/heads/main | 2022-07-29T18:59:28.585133 | 2022-04-22T08:02:52 | 2022-04-22T08:02:52 | 372,473,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,475 | py | # Copyright (c) 2015, VMRaid and Contributors
# License: MIT. See LICENSE
"""assign/unassign to ToDo"""
import json
import vmraid
import vmraid.share
import vmraid.utils
from vmraid import _
from vmraid.desk.doctype.notification_log.notification_log import (
enqueue_create_notification,
get_title,
get_title_html,
)
from vmraid.desk.form.document_follow import follow_document
class DuplicateToDoError(vmraid.ValidationError):
pass
def get(args=None):
"""get assigned to"""
if not args:
args = vmraid.local.form_dict
return vmraid.get_all(
"ToDo",
fields=["allocated_to as owner", "name"],
filters={
"reference_type": args.get("doctype"),
"reference_name": args.get("name"),
"status": ("!=", "Cancelled"),
},
limit=5,
)
@vmraid.whitelist()
def add(args=None):
"""add in someone's to do list
args = {
"assign_to": [],
"doctype": ,
"name": ,
"description": ,
"assignment_rule":
}
"""
if not args:
args = vmraid.local.form_dict
users_with_duplicate_todo = []
shared_with_users = []
for assign_to in vmraid.parse_json(args.get("assign_to")):
filters = {
"reference_type": args["doctype"],
"reference_name": args["name"],
"status": "Open",
"allocated_to": assign_to,
}
if vmraid.get_all("ToDo", filters=filters):
users_with_duplicate_todo.append(assign_to)
else:
from vmraid.utils import nowdate
if not args.get("description"):
args["description"] = _("Assignment for {0} {1}").format(args["doctype"], args["name"])
d = vmraid.get_doc(
{
"doctype": "ToDo",
"allocated_to": assign_to,
"reference_type": args["doctype"],
"reference_name": args["name"],
"description": args.get("description"),
"priority": args.get("priority", "Medium"),
"status": "Open",
"date": args.get("date", nowdate()),
"assigned_by": args.get("assigned_by", vmraid.session.user),
"assignment_rule": args.get("assignment_rule"),
}
).insert(ignore_permissions=True)
# set assigned_to if field exists
if vmraid.get_meta(args["doctype"]).get_field("assigned_to"):
vmraid.db.set_value(args["doctype"], args["name"], "assigned_to", assign_to)
doc = vmraid.get_doc(args["doctype"], args["name"])
# if assignee does not have permissions, share
if not vmraid.has_permission(doc=doc, user=assign_to):
vmraid.share.add(doc.doctype, doc.name, assign_to)
shared_with_users.append(assign_to)
# make this document followed by assigned user
if vmraid.get_cached_value("User", assign_to, "follow_assigned_documents"):
follow_document(args["doctype"], args["name"], assign_to)
# notify
notify_assignment(
d.assigned_by,
d.allocated_to,
d.reference_type,
d.reference_name,
action="ASSIGN",
description=args.get("description"),
)
if shared_with_users:
user_list = format_message_for_assign_to(shared_with_users)
vmraid.msgprint(
_("Shared with the following Users with Read access:{0}").format(user_list, alert=True)
)
if users_with_duplicate_todo:
user_list = format_message_for_assign_to(users_with_duplicate_todo)
vmraid.msgprint(_("Already in the following Users ToDo list:{0}").format(user_list, alert=True))
return get(args)
@vmraid.whitelist()
def add_multiple(args=None):
if not args:
args = vmraid.local.form_dict
docname_list = json.loads(args["name"])
for docname in docname_list:
args.update({"name": docname})
add(args)
def close_all_assignments(doctype, name):
assignments = vmraid.db.get_all(
"ToDo",
fields=["allocated_to"],
filters=dict(reference_type=doctype, reference_name=name, status=("!=", "Cancelled")),
)
if not assignments:
return False
for assign_to in assignments:
set_status(doctype, name, assign_to.allocated_to, status="Closed")
return True
@vmraid.whitelist()
def remove(doctype, name, assign_to):
return set_status(doctype, name, assign_to, status="Cancelled")
def set_status(doctype, name, assign_to, status="Cancelled"):
"""remove from todo"""
try:
todo = vmraid.db.get_value(
"ToDo",
{
"reference_type": doctype,
"reference_name": name,
"allocated_to": assign_to,
"status": ("!=", status),
},
)
if todo:
todo = vmraid.get_doc("ToDo", todo)
todo.status = status
todo.save(ignore_permissions=True)
notify_assignment(todo.assigned_by, todo.allocated_to, todo.reference_type, todo.reference_name)
except vmraid.DoesNotExistError:
pass
# clear assigned_to if field exists
if vmraid.get_meta(doctype).get_field("assigned_to") and status == "Cancelled":
vmraid.db.set_value(doctype, name, "assigned_to", None)
return get({"doctype": doctype, "name": name})
def clear(doctype, name):
"""
Clears assignments, return False if not assigned.
"""
assignments = vmraid.db.get_all(
"ToDo", fields=["allocated_to"], filters=dict(reference_type=doctype, reference_name=name)
)
if not assignments:
return False
for assign_to in assignments:
set_status(doctype, name, assign_to.allocated_to, "Cancelled")
return True
def notify_assignment(
assigned_by, allocated_to, doc_type, doc_name, action="CLOSE", description=None
):
"""
Notify assignee that there is a change in assignment
"""
if not (assigned_by and allocated_to and doc_type and doc_name):
return
# return if self assigned or user disabled
if assigned_by == allocated_to or not vmraid.db.get_value("User", allocated_to, "enabled"):
return
# Search for email address in description -- i.e. assignee
user_name = vmraid.get_cached_value("User", vmraid.session.user, "full_name")
title = get_title(doc_type, doc_name)
description_html = "<div>{0}</div>".format(description) if description else None
if action == "CLOSE":
subject = _("Your assignment on {0} {1} has been removed by {2}").format(
vmraid.bold(doc_type), get_title_html(title), vmraid.bold(user_name)
)
else:
user_name = vmraid.bold(user_name)
document_type = vmraid.bold(doc_type)
title = get_title_html(title)
subject = _("{0} assigned a new task {1} {2} to you").format(user_name, document_type, title)
notification_doc = {
"type": "Assignment",
"document_type": doc_type,
"subject": subject,
"document_name": doc_name,
"from_user": vmraid.session.user,
"email_content": description_html,
}
enqueue_create_notification(allocated_to, notification_doc)
def format_message_for_assign_to(users):
return "<br><br>" + "<br>".join(users)
| [
"sowrisurya@outlook.com"
] | sowrisurya@outlook.com |
ad3b204e0353db2d321706bbc3b27e91899eaa08 | 8a0f8d4b05e26f04dd584ed51ab77c78f00740a8 | /monasca_notification/types/notifiers.py | a1040a14ac468bbb4580173f5a8bd071dbd5e838 | [] | no_license | TonyChengTW/monasca-tony | 1ce9d8fb9299864dc049c9caef0f6ce10b871670 | c2b3e70a3fd331410a642aec1a5e0ac58d4dc7e4 | refs/heads/master | 2021-06-27T02:14:06.889700 | 2017-09-15T02:46:25 | 2017-09-15T02:46:25 | 103,096,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,097 | py | # (C) Copyright 2015,2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from monasca_common.simport import simport
from monasca_notification.plugins import email_notifier
from monasca_notification.plugins import pagerduty_notifier
from monasca_notification.plugins import webhook_notifier
log = logging.getLogger(__name__)
possible_notifiers = None
configured_notifiers = None
statsd_counter = None
statsd = None
statsd_timer = None
def init(statsd_obj):
global statsd, statsd_timer, \
possible_notifiers, configured_notifiers,\
statsd_counter
statsd = statsd_obj
statsd_timer = statsd.get_timer()
statsd_counter = {}
configured_notifiers = {}
possible_notifiers = [
email_notifier.EmailNotifier(log),
webhook_notifier.WebhookNotifier(log),
pagerduty_notifier.PagerdutyNotifier(log)
]
def load_plugins(config):
global possible_notifiers
for plugin_class in config.get("plugins", []):
try:
possible_notifiers.append(simport.load(plugin_class)(log))
except Exception:
log.exception("unable to load the class {0} , ignoring it".format(plugin_class))
def enabled_notifications():
global configured_notifiers
results = []
for key in configured_notifiers:
results.append(key.upper())
return results
def config(cfg):
global possible_notifiers, configured_notifiers, statsd_counter
formatted_config = {t.lower(): v for t, v in cfg.items()}
for notifier in possible_notifiers:
ntype = notifier.type.lower()
if ntype in formatted_config:
try:
notifier.config(formatted_config[ntype])
configured_notifiers[ntype] = notifier
statsd_counter[ntype] = statsd.get_counter(notifier.statsd_name)
log.info("{} notification ready".format(ntype))
except Exception:
log.exception("config exception for {}".format(ntype))
else:
log.warn("No config data for type: {}".format(ntype))
config_with_no_notifiers = set(formatted_config.keys()) - set(configured_notifiers.keys())
# Plugins section contains only additional plugins and should not be
# considered as a separate plugin
if 'plugins' in config_with_no_notifiers:
config_with_no_notifiers.remove('plugins')
if config_with_no_notifiers:
log.warn("No notifiers found for {0}". format(", ".join(config_with_no_notifiers)))
def send_notifications(notifications):
sent = []
failed = []
invalid = []
for notification in notifications:
ntype = notification.type
if ntype not in configured_notifiers:
log.warn("attempting to send unconfigured notification: {}".format(ntype))
invalid.append(notification)
continue
notification.notification_timestamp = time.time()
with statsd_timer.time(ntype + '_time'):
result = send_single_notification(notification)
if result:
sent.append(notification)
statsd_counter[ntype].increment(1)
else:
failed.append(notification)
return sent, failed, invalid
def send_single_notification(notification):
global configured_notifiers
ntype = notification.type
try:
return configured_notifiers[ntype].send_notification(notification)
except Exception:
log.exception("send_notification exception for {}".format(ntype))
return False
| [
"tony.pig@gmail.com"
] | tony.pig@gmail.com |
f70d19fa63aaa8a4a7b649010d1203aedd440435 | a26923adfffd7cab44d15f76156476ccc4f11b70 | /src/GimelStudio/datafiles/icons/icons.py | f58511183cf2ac83186b037d356bfde75c756943 | [
"Apache-2.0",
"MIT"
] | permissive | MarioPeper/Gimel-Studio | 89c81aeb133390f0c8de9286ca5aa92d9a5af9ff | 7eee20adfdb701034017131ebbd706e8f8166b81 | refs/heads/master | 2022-12-11T14:51:50.127304 | 2020-09-02T21:59:16 | 2020-09-02T21:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,909 | py | from wx.lib.embeddedimage import PyEmbeddedImage
#----------------------------------------------------------------------
ICON_EXPORT_IMAGE_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AACySURBVDiN3dE9DkFBGIXhx09iBTqhYgUanaVoNHagUqglektgAQqV2IRGNESjECUJBTeZ'
b'guRm3EQ4ySlmcr73O5nhX1RBLitYA3tMsoDWscPt6fEnsBq2ASzxKAZWxeYFLPHg3eC7N2l5'
b'fAS00cMJ3SAzxzmmbefZ6pAmnI/Z8NvAQopMDkcsscq6QKrtiRYoR3JmGEIxuCyiGQErox9C'
b'Ql0igNfwEAJLWEcAYRo59wXdAfAZIXVxkZWFAAAAAElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_GIMELSTUDIO_ICO = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'ABJ0SURBVHic7Z17bFzVnce/5z5n7Bk7ie347ZAQv5I4iEAgCwtqQ0O6wLaQRYCQVgWpgn1J'
b'u39QpF0JEPzVlRB/QDb7BxVSBRVKl+3CqpWKohLahqWEAFviOjG2N4nt+BGPn/O8cx9n/7h3'
b'7mPGY49f47nj85FQ5r6POd/fOb/zOy+AwWAwGAwGg8FgMBgMBoOxJSDF+EhTU1Mrz/N/Synl'
b'ivE9xuIQQm4kk8k3I5FINHNO2OiPtra23g/gZwBqCSmK3hhLEAwG/wLAsczxRuYI19LS8gIh'
b'5EUAzPJLB8rzfMXVq1dTAMBvxBc+/PDDexcWFs5PTEw8iCJVM4yCIclk8l8TiUQa2IAq4LXX'
b'XvvHF1544bXx8fEcq3/ggQfQ1taGM2fOYGhoyD7f0dGBo0ePrndSGADS6TTeeuutvNfXVQDP'
b'P//86ZMnTz6mqqrnvCzLePnll/HEE08AAM6cOeO5/uijj+KZZ55Zz6QwLGKx2MYL4OGHH97G'
b'8/z5d999tz372u7du3Hq1Cl0d3cDAFKpFK5eveq5Z9++feuRDMYqWLNz9uSTT94RiURGz58/'
b'n5P5x44dwwcffGBnPgBcvnwZuq577uvq6lprMhirZE0lwIkTJ/7uyy+/fCMej3uEJAgCnnvu'
b'OTz77LPIbvpdunTJc1xXV4fa2tq1JIOxBlYrAOH48eM/+/zzzx/LvtDQ0ICTJ0/i9ttvX/TB'
b'bAG4SwdG8VmxAJqbm1uampp+29fXtyf72pEjR/DGG2+grq4u7/PZAmD1/+ayIh+go6PjO1VV'
b'Vf0TExOezCeE4KmnnsI777yzZOZTSlkJUGIUWgKQgwcPvjQ3N/cipdRTqYfDYbz66qs4fvz4'
b'si8ZGRlBLBbznGMC2FyWFUBzc3NNVVXVL2ZnZ+/NvnbgwAGcOnUKbW1tBX0s2/olScKePTk1'
b'CaOILFkFtLa23h4MBntjsVhO5j/yyCN47733Cs58IFcAHR0dEIQN74/KgVIgoWhF/24pkvf/'
b'fltb2zMA/k1RFM89sizjlVdeweOPP77ij/X19XmOi138UwC//mIMv/r8OmJJDbVVMh67Zxfu'
b'6KgpajpKiRwB1NXVhYLB4E8opTk5vGfPHpw6dWrVgZvNbgH89x9G8V+fjtjHkQUF//6rbxAO'
b'7kd3a1VR01IqeKqApqamzoqKis8Wy/xjx47h/fffX3Xmx2IxjI6Oes4VMwL41dAs3rcy3zAM'
b'+zwFcOar8aKlo9SwBdDa2vp9nue/MAzDY5aCIOCll17Cm2++iaqq1VvJpUuXQCn1nCuWABYS'
b'Kt46MwTz6xTjY9c9aZmcSxUlHaUIBwC33XabCOCnACrdFxsbG3H69Gk8/fTTa/5QdvHf1NSE'
b'7du3r/m9hfDeJ8OIJs0eypnpGSQTCWia4wQuJFTQfA+XOQIAzMzM7ABQ7b5w99134/XXX0dN'
b'zfo4SNkC6OzsXJf3LsfEbBKf9E0BAFRVxezMtPk7nYYoigCAaFLF35w8j7pqGbVVMuqqZdRV'
b'BVBXHUD99gB2VssQ+PIc1JS3FSBJEt5++210dHRg7969aG9vz+nYWQnZAti/f/+q37USfvE/'
b'I9AN076nI1N20b8wP4+KSqfAU1Qdo5EERiOJnHdwhKCmSsbexhBuvXkHem7ahqC0IYOpik5e'
b'AZw9exZnz561j8PhMDo7O9He3o6Ojg4cOHAAPT09CAQCy35E13X09/d7zhWjBTA8FcfnA6bF'
b'K4qCeNSJQsZiUYxdH0UoHEYgEIQoinkFblCKqfkUpuZT+PRyBAJPcNveHfjenS1orqnY8L9j'
b'Iyk4ChONRnHhwgVcuHDBeVgQ0NTUhL179+LgwYPo6elBe3t7TnDo6tWrSCaTnnPFcAA/vngD'
b'GV9vZjoCmlXTJ+JxJOJx84AAAi9AFEUIoghRlCCKIkTJ/JfnHYvXdIrP+qfxxeAMvn+kFQ/d'
b'0ezbgY9rCsNpmobh4WEMDw/jo48+ss/X1taiq6sL+/fvR3d3NyYmJjzPBYNB7Nq1ay2fXhbd'
b'oLhgWb+u64jH4ks/QM2/R9M0IEusACCKIipDIVSGwggGgwBMIfznJ8OYi6Xx10d3r/vfUAzy'
b'CuDTTz9FX18fBgYG0N/fj97eXgwNDXna0PmIRCI4d+4czp07t+j1rq4uj0VtBJdGFrCQMD3/'
b'WDQKrNHPV1UV83OzmJ+dRSAYQF19AyRJBgD85o8TuLkxhLu68/eElip5BbBz5040Njbivvvu'
b's8/FYjFcuXIFAwMD+Prrr9Hb24u+vj4kErmO01IUo/j/rH/K/h2LLqzbewkBlFQKYyPX0NjS'
b'Blk2faDTvx/GnZ214Dl/VQYrqgJCoRB6enrQ09ODEydO2OcnJyfR29uLixcv4ptvvsHAwAAG'
b'BwdzAj8ZiuEADoyZDp9h6Eimcov0VUFhz3IwKMXM1A00tpj+znw8jS+HZnC43V/9CuvSFVdf'
b'X4/6+npPaRGNRnH58mUMDg7aVUhvby9SqdSGdwKlVB2Ts2amK4qy1tIfAEAIBXG5egSAkkpC'
b'SaUgWy2hjy9Obk0BLEY4HMbhw4dx+PBh+5ymaRgaGsJNN920UZ8F4I3saZq+5L3LQmAKiBJn'
b'jhOhIOAAUCTiUVsAg2MxGJSC89EcyKKGtwRBQGdnJ2RZ3tDvpNKuTM9TDRUMhZXhDsRSBSEE'
b'acXpR1BUHden16m6KRJlGd+URaeFwQtraG1Yuc7BsX4CeKoCRVE8j1y7sUxzs8QoSwFsD0m2'
b'Ny7LgdWHsKnp9Zv1iWnxZt5T2K+kOnTd6ViKLCg5ryllylIAksChpdYM0fI8j1AotLYXEnPk'
b'c6Y6IVY5kLlouPyM+Xh6bd8qMmUpAAA40uXMNtq2Y4en2C4UAoBY1p85YZYClhCIKQbNcAQw'
b'xwRQGvz5vjq7C1eWA6jaVr3ME24sp49QM9NBHA1QwLvkAQV1zXWMp9bY6igyZSuAcFDEsVsb'
b'7OPaup0IWDH85bB9Bur2/Khj/YTY1g9CPJ1Mmr58qLyUKFsBAMD37mzBtkoJgJmpzc0tqAxV'
b'LvMUdZp+js8HgIBQKxRInSoBgCfiqer+GltU1gIISjz+/qEOCLyZU4Tj0NjYgobGJkh5YhHE'
b'5dzZ/xDXsfUzU0qYzqFzSdX8VQIUf1ZGkWlvCuOHx/fiJx8OQtPNOj0UDiMUDiOtKEgmk0in'
b'FWvNAgJiOXgc4cBxBAYFdDWNtJLKNnznN3Uy3W9VQNkLAACOdNZiW6WEk7/sRyzptNklWc5b'
b'EmRDqRn2nZuOwLC8fjsW4KPQbzZlXQW46Wqpwo9/cCu+fbB+VV22hBBUhqrQ0LLLns5GMhFC'
b'f1X7HrZECZAhFBTwg/v24OEjLfhD/zS+GprBtRtxJNOFN914nkfNzkZMjWcmuRCsIsRQMmwp'
b'AWSorpRw/FAjjh9qBAUwG1UQV3QkUhp0w/T+KwMCAhIPVTMwMZvCb/53HH0j5sASSQ4gUFGJ'
b'VCJuNQ39q4AtKQA3BMCOsIwd4fz3NNdU4Jbd2/DPP/0jpubN3r+KyjBSyaQrWuhPtowPsFYE'
b'nsNd3U54WQoELMP3ce6DCWBF3NzoFBM8L4DnOYBwvvYBmABWQMN27yQYXhBzYkR+gwlgBWRP'
b'ByM+t36ACWBFZAuA5zi7n8CvMAGsgLTmdfgoKAjHgfjYEWQCWAHJrIWlqEFzxgj6DSaAFZDI'
b'ihhSanYu+df+mQBWRE4JQA0QkDWtm7DZMAGsgLm4ayMMSkENa9iwj4sAJoAV4F5MypwlbY0d'
b'9NmEUDdMACvghksAuq5bw4JdQ8R8CBPACpiadyZ9GJrqDB5jPsDW4Ma8M+9P0zVrdLB/Mx9g'
b'AigYVTMwG3MmfRi6v8b/54MJoEAm5lKeqt7wlAD+LQWYAApkcta7nKymaeasYZ/DBFAg47NO'
b'/Z9pAmbmBxDWDCx/xmccAeha2o7/E8KxQNBWYMJVAui65popvImJWgeYAApkwuUDZNYDIISY'
b'A0J9LAImgAKYT6iePYZ0XXO8f0LMasCn+DflRWRixrvwk6HrpvUDLBS8FXAX/6AUNDMZlDPj'
b'AH4OBjIBFMDYjLMUrqZr1uJRHDLTwqiPnQAmgGXQDYqvhmbt40wE0Kr+wRHCxgSWM2e/nsSN'
b'eVc3sJq2HUAzBuDvDiEmgCUYiSTw899fc05Qc68h2JYPa40g4r7FV2z5yaGLkUrr+L+JGN4+'
b'ewVp15IvSjJuOoCEs2YEk5x1gVMrmGpeCjABwIzyDU3EMDgWxeB4DNcjCRhZTTtd15BMmdPB'
b'7UzPdAS6bmUCKHHSqoErkzEMjkftDM/sKZgPSikS0TkQak4EAcy5AFymJHAVArpBkVYNSKI/'
b'ateyF8D0goKB8SiGxqMYHItheCpubyNXKInYAnRNB8cRe21AcMRZRDKr5p9LpLGzevnd1EqB'
b'shSAblD88vx1/LZ3EjPR1S3daugaNFWFmlagplMQBBFyRQiqkoRhGNYK4tZCkVl6ml5gAthU'
b'3v7oCj6+OFn4A5RC01SklRR0zcx0Sik4zmnuSYEgREkGCIGSjFtxAM5qAnoV4KcVw8tOALOx'
b'NH7Xe2PJe3RdtyxbgaqkoGqqFcwhVjHPmYM8qDnYg3AcJNlcfVwURKgE9rpAiw0Jiyz4ZzPq'
b'shPAn4bnvR48BVQtDSWVhKooSCtJ6JoOswPP9OgzgRwuM7LHHd8nBIFA0LU6PAEvyDAM3aoB'
b'zBKAwtlTKDLPSoBN40/X5uzf0flZzM1OA5R6eu84Ylo3xxE4YV3OdurMPLWsnxBIAe8i07wo'
b'wFB06xmzinCPC3APHyt1/NFWKRAKc8PIDPFYFDCoE6q1V/iGPY4vM73bLsRtoZi/RUkG4bwL'
b'Qwi86IgHZingXjF8bDrpm4hgWQlgNJKwN2wwDANqWrGLaDM/XdYN2NE8JyM5OHF+87wcWGRz'
b'aELA8YIVFOKQ3ReQUnXM+MQRLCsB9A3P27+VZMLlmlkZaq3/7x7Fa8dxiFMiwLJugRPBC6J9'
b'785tTtOO50XnBUBOJ8CYT6qBshKAu/5PpRIwrdvZx49YxbpZX7uCOsQ1sINwtiBk1wYTVRUi'
b'/uHBDltUHMeBI7z1HJczLGzMJ9vHlY0ANJ3im+tR+1hJJOHZ6zVT22eW9iQu67fv4Oz4Ps/x'
b'EFwriR89WI+2nZXobKmybibgMlvSLRILGI2sbD/lzaJsBDA0HkVKNTtizG3gVbvbFrCKfUId'
b'67f+ta0fsB1AAh6SHLRdQ4Hn8O2D9QCAo7c429AQjgPhOPMbWX7ACBNAcekb8db/OdZPKZyF'
b'nV17/mTusJuDBBwxl4LNcGdHDaqtrWcO3bwD1RWi9VZP+8HDaCRublBR4pSPANwOYCrhtPcB'
b'8680C4DMMD6n/M8EgjJ1PwgEOeip092bTwk8wbes0sB8Ta71A2aVdH269EuBshBA0hrAAQCg'
b'FIp7u3gr4z3WzzmW6+7bzyz4JMmO9Xc0V+Gmeu/Gk8cPNSIUtGJoSwwH88M2smUhgMujC3YX'
b'r6qmoVvj9gG423mO9Vvn3Rs/ZQQiCCJ4wQmQ3n9rY873KmQBDx1uXjZdTABFom/Yaf4pSdP6'
b'7RgPdUf6MjN57SPrXs4uJaSgE/ipqZJx683bF/3mfbc0oKZq6f2Grk0xARSFvmEn/JtSEq5W'
b'mWPZdouAZpx/x+nL+AIcL0BwBX6+c0tD3v2FRIHDI3/WumS6hqfiOUPLSg3fC4BSZ+o2pRTp'
b'ZMqJ91v3ZOL9bqfPsX5iW78oy3adLos87j2wc8lv372vDvt35d+SNq0amI8vPdxss/G9AAgB'
b'qitNq9U0FaIkQpRkiFIAohyAKEsQJRmCLEMUZEiSDF6QwEsiRFECL4jgRfM/WXYif3d116Iy'
b'sHRnKQHww/v32s3CbCSBQ2iZd2w2pZ26Arn/UCNO/+4aRFFCXePSxXIhBCUef3nH8k4eAGwP'
b'Sfinh7vw4//og6J6RwR/62A9RKG0bay0U1cg372tCY/dswvh4OKWWCgEwJ6GEH70V/uwI1zY'
b'hpIAsLs+hB+d6Mbu+hAIgAqZx4OHm/H4PbvWlJ5iQABg9+7d9ZqmTbgvDA0Ngef5xZ8qUSiA'
b'REpb9r588DxBQFzb35xSdUgClzNhZLOIxWI4cOCA51wqlQpPTU3FgDKpAjIQYNl6e6NZq4CK'
b'TVlUAYzVwwSwxWEC2OIwAWxxmAC2OEwAW5y8baZDhw4VMx2MDYIu0xmVVwDz8/P5LjHKCA4A'
b'FEWJAijtbivGepGYmpqyh0xxADA2NpYghLwIwMj7GKMc0Akh/wLA7rXyBKybm5treJ7P38HN'
b'8DWGYcyNjo7ObHY6GAwGg8FgMBgMBoPBYGwO/w9m2y+b5y+5rgAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_EXPORT_IMAGE_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAC9SURBVDiN3ZIxDgFBFIb/J5s4wXZCxQk0OkfRaNxApZDoJHpH4AAKl1BqREO0oiT5NGuN'
b'WMnO7CbC173kf9+8NzPSXwDUACtL1gKOwLywFGgCB57MisgawJ53piGyOrDLkD0YferNvBOg'
b'I6mWlF1JA0lnSX0ntjKzS8i0vWSqU558xfuEnxdGOTIbSRNJ/g9QBum3AdaS4kDP0szG0uvK'
b'kaR2gCyWNHQlKWZ29bUBN7d2hVVg6ytMWAT2fYE7BjSG72txFD4AAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_NODE_IMAGE_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEWSURBVDiNzZM/K4VRHMc/P/cWi00Go5QY7iZZMQuLP3kB3oHFC7BIWUQZbgazlWKyGWVT'
b'7qSbURaDj+Xcerqe514nhW+d+v3p9znfc04H/ruiE6g7wMIPWHsRcV0vFBrADHCZCRoCloAm'
b'QL2r+RQRazk0dSwBARjIdNNXfw9U19VDdbas332H/WAN4DylW+pIlkN1uKvUBl5S/BARH1lA'
b'oKnudpKIaAOTwBwwXzZQeWR1E1gBltXniDhNrUGgFRHvZXOlDtPdHHSMAcfqasr3gVt1PMfh'
b'ETBayGvAmToBbKRNbtRF4K0nMD3EVVpl2i7EU8BdT2BEvAInFbAvSl+vEjitPn4XllSrAl4A'
b'rUxYUfc/mP1FfQIELlqo3A3nVgAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_NODE_IMAGE_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAD3SURBVDiNzdGvSoRBFIbx3+qClm2LYaMIomGbiFXNohb/4AV4BxYvwCKCRRQMi8FsVdBk'
b'M4pN0CRiFItBDDvhY5jdYVyQfWHgPefMeebMDMOuWsXvYWkA1gFu65VEG3O4LgSNYwUdqEfF'
b'F2wUAlsBCEYKm7MaCuAmjjGfKsZvmFMbl8HvoBlvyE3YiOJ3fAT/hJ9SYAf7EXAaC1hMNfS7'
b'8jbWsIo3nIf8GF7xnWrqNWETR8HXcIr1EB/iHpMlE55gohKP4gJT2AqH3GEZXzlgAzdhpbRb'
b'8TN4yAE/cdYDllKrH3AWzwUwus+RBF7p/t5f9ThA7z/qF2jpIcxRRrEJAAAAAElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_SHOW_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEZSURBVDiN7dO/K8VhFMfxF1EmYsFkISlkYPDrLzCQSTLamGRjkM1oMLMqyqCk5B/Qpavu'
b'pUxKiZIYhHIZHFHut+9lMvjUMzzPec77fM7pefjXn1NZSrwNI2jGI/LYwNVPC7ViB69F1jNW'
b'UFsqbAIP4WIeXWiKNRiwR5yjOw02hQLWUY0OHIazArbCWQuOcI++JNhkJK2iPM7OkMMARgOw'
b'FrFaZHBXzGklLrzPpyHOaqLA4pd7+zj+sh8O95vFHPaGgwOfA9/DDWawhBcsRKwT1zhBfULX'
b'+gOaR0+Al2OfwSwqMI5bnKIxCfah9rhYwC6mMRTtzSEbbW6jLg32oapIvvT9HeYwlpSY9lMq'
b'vM+2GU8By5bq6l+/0xvVOkgmg9v1bQAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_SHOW_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAGGSURBVDiN7ZM9a5RBFIXvDO8uVoat1CpNVIQYLLSIH79BsRAJYmVnJ3ZaiJ2lRWpthQiC'
b'iIXiHxAVhV0FiyAIEsGIKeSd+1zfYzOB4G5WxcYiB6aZj3Ofc7ljtqP/TmnaYSnlUM75TEpp'
b'zszarutGvV5vJaW09ldV2rY9CDwGNGF5RCxLGvyRWURcAL4Da8B1dz8iaVbSLHAqIpaB1t0/'
b'uPvRqWbufhno3P2epN2llMPAy0rWAQ8kDdq23Q+8AjaA49uZXQK6iLgjKVfa98AQOBkRZ4EN'
b'd79rZiZpALwAvo2RSuoBHwGXtLfuzVSqm5v3gGfAmy3tOV3p748RAos1wvPNhgNPgS/AFeAW'
b'8AO4YWZWSlkAPgNvJe2ZGBs4UU1H7n6sxroNjGq8q5KaiFgCvgLvJO3b6jE2h6WU+Zzzipkd'
b'MLMnkh7mnFfNrJE0b2bnzGzBzB41TXMxpbQ+ke6Xnu4CrgGfJszhMCLOb/d26k+p8RYlzeWc'
b'S9d1w36///q3RDv6J/0EndSGOUdbS8cAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_HIDE_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAFpSURBVDiN7ZM9S5xBFIXvhEWWVME2pLBfxCYgspgqIIQ0QcHCFJJIhPyP/AARUoQU6WIZ'
b'UiwpBIs0FlaCICJhBUG0EMKqu75zz5Mid+F1ff0IKWw81czlzjPnHmbM7nVnAp4DX4Havxx6'
b'IekHsC1p091XgAbwBiiADDy7EVQUxZSkI67XKfBy8OyDwYK7f6zVaq2U0jCw6u6zZtY0s0kz'
b'+1lqHTKzR9c6i5GQtAc8zjm/knQ64Kydc56Putz93VWwLwHb6Qct6RzoAe0+zd1/Rb4jwAkg'
b'4PUFGPAwAgYYjdqTUlYA34Bj4Kxk4kOY2L/kMOe8ELd14vbx2COpLWk91mvRPw24pB7QqBwb'
b'WAxIf1RJOgC6wImk70AdWI6+HjBWZqQK6Cczexvrjpm1gC0zG0opPTWzZkqpDhymlCZSSruV'
b'7gIwF3n9lrQhySveX9fdl66EDADr/P1Oo6X9e3f/HGPO3Ap0r//SH7cBxHpydqkgAAAAAElF'
b'TkSuQmCC')
#----------------------------------------------------------------------
ICON_HIDE_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAI8SURBVDiN7ZIxaBpxFMa//3kJ5ryYogRFqemdQcOllFooVolkUKFQXIqBDFdCzRWzKBfI'
b'pA4HFUeRUqiQpQWHlk6lQ6dCM3YoSIcMKUVSW1JaiCKJiWLu38VAaoxLhy754C0Pvu/93uMB'
b'l/qfigJ4CYA92ySjHBsbG/fq9Xq62WxeMxgMXYvFsu1yuR4XCgU/gHLfHwawNXL0+vr6Xb/f'
b'/xsAHSyDwUABUEJIG0Bs0MsONhKJxLNyubxmMpn0paWl9w6HY5PjuO+9Xo+pVCqFvb29BUII'
b'rFbruCzLV0ql0sVksiy/IoTQxcXFb9ls1qmq6v25ubn2WUKGYXYjkchDr9fb5nleT6VSyaFh'
b'KysrLwghNBqNftE0jQWAmZmZriAIHY7jdk/XDYVCNQDI5XKCJEmHk5OTuqqqD/4K0zSNs9vt'
b'vbGxMZrJZG4AQDabvUoIoRMTE6eEb3w+X8Pj8Ryd+pLJZAEADYVCP84RplKpRzzP6/Pz8we5'
b'XE4AcIdlWR0AdTqdu7FY7CPDMDQej38AAFVV41ar9UQUxU4mk7k+dO10Or3G87xus9m6hJAO'
b'AF0UxZ9ut/tYkqTD5eXlt5qmGRVFeTo1NaX3w26ezTj3h4IgbNZqNQUARFE8CAaD73ie36aU'
b'jjcajdvVanVhZ2fHGAgEfoXD4WA+n/86lK4vuX+vViAQ+DQ9PX2CgT+cnZ09VhTlyUUBg4RG'
b'AM8BFAB81jTNuL+/v9pqtW6xLHtkNpu3isXi61FEl/p3/QFBU779NNk9pQAAAABJRU5ErkJg'
b'gg==')
#----------------------------------------------------------------------
ICON_IMAGE_SLOTS_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEBSURBVDiN7ZKtSoRBGIWf13XB4CUoWCyywb8gaNPqgmDQqzBs8Q4E70C8gkWbQZNBbeJi'
b'tCxoEw02Nz6GHXU+f2Dgq3tgmPMemDNn3nlhhLqIT6JuAkfAO7AbEbfqMrBW6HUWEf2vSr3z'
b'GydJ27ccWwDj2Q3XwGLiN2l/BnqFCd8qlRrqurpSaPAnxjI+CbSAltqsYwqA2s36cZC0jjoo'
b'XG2o9nAq49NpbwIThZkaUB2beeCQ4djsRcSTOgPMFhreR8Trz2c31PjnQBHyhDvAMTAAtiPi'
b'Sl0FNgq9uhHxkKfrZZ9ymrRag30OLAACF0l7BC4LE778UtQlda7QYIQhPgDE9v1/YKTjTQAA'
b'AABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_IMAGE_SLOTS_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AADpSURBVDiN7dK9LoRBFMbx34tNFC6BRKMRha9CQkdLIlFwFQqNO5C4A3EFGzoFlQKd2Cg1'
b'Ejqh0FFSvEd2ljdxkm33SU7m5D8zz5ycOQzUr6oiX8cRPrCDWyxiJel1hscS3OEr4iTYfsH+'
b'i00YKQyvMR/5Tawv6CQrfP8NKqxiKWnQqKEiH8NMRKsf0x+1dftxEGwPn8nYoLeH40U+EWsL'
b'o8mChukdm1kcqsdmF8+YxFTS8B5vTa9Uf8/mVV7exrG6H1u4wjLWkl5tPJSgo/spp8H6Guxz'
b'zMXmRbAnXCYrfG2CC5hOGgxU6xvfNEQnUxceCwAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_SETTINGS_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAF4SURBVDiNzZM9S5xBFIWfdzUh4kdikTSCWAguCCIGqzQqmMJfIDZiCrEQbYLYWfsDJEhS'
b'5C/EQqw2abSwUVMkSEDBQEAloDYqok+K3IXJqrtZk8ILw8yc857znjvDwH0otfZ/mk2pn9Qn'
b'6qI6k/K5O3jWAJ3AEvDqX5INqxsxHgfWr26r82prtQkLwFegkGXZMUCWZR+BBqAfqCuXplvN'
b'xzqnvlWfqoPqploXXK96qD6o1N4b9Yvapc74u1rVXfW7+lldVn8GN17U3nb9O8AEsJVgk8AU'
b'MAAcAa+BFqAn9tdS5dVVdUE9iz9fqMf+Wetqjzpdqc169V0iXFGbgxtTrwJ/UdaoxLQpMcyX'
b'cIXA36vtt3nkEkFjnEuxLku+Le5HgW9qW6V0c+oP9SSSLKkNwQ2rl4EPqU0V202M+xLxqbqf'
b'HMOB+rCc/qaXshfzOfAIeAacAR+ARmD2r9NFwtG44Xr1ZSQbCS6vdlRlGMIsWa+pz6s2ubf1'
b'CxOqfOZ2Ar0KAAAAAElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_SETTINGS_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAFNSURBVDiNzdK9SpxBFMbxn6sRxPhVWAkiISELQhDFKo0KsfAKgk1ICrEQ04hlAulyAUFE'
b'Cy9BthBB2KTRwiaJhZJGCyGQiBBtFCRq8Z6F0aDrrin2wDDzPofn/54zZ6iRaPifsGl8QTvm'
b'MZsmc1UA69GLAt7cp7KX+BqrLbRh/MBHdFdaYRE7sR+F9hkPA9x0m7kP+TjnsIBOvMC3xDyI'
b'AzwoV80ctvFMdukXspb2sI8trOAwchMl403j38UkvifalGzCI/iDGXShP77/iTzW8Qmn8ecz'
b'2X1dJGszIG/LtdmMxcS4io7IvcZ56M/LgdJoTYD5a7li6Et4fBdYCz4kwCfX8muutt5TDvge'
b'P3EchoLsjZE96r+hj0Und46hxHyCX0lVv9FYCQweBbA06RJ4OfZ3lQJfySbcjNEAjkcuj6eV'
b'AqEuOW9goBpIbcYlP91RvnE0nWMAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_PANEL_ASSET_LIBRARY_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAE5SURBVDiNzZM9LwRRGIWfcyNBEIVmWyrJVhQSiVqpVvkFOoVo1RqFSkPrH0gkEg3xE3Si'
b'Ib5DloR5j8Ks3Ay7MtmCk9xMzpm8zz33Tgb+uwRgewLY6oFzLWkpB87b3pf0Yvu9JmwAeEsp'
b'DX0ltucjwrYX6lYrimI7Ip7avi9/GRGLRVFM1WROA6kKvAVIKd0AFzWBz8BDFfhcPg+AY2As'
b'GzgDmlmLu7LADHAUEbOSGlXglyJiRdJq20satX3K5+VjeyeltGH7UNJwdT5Vg171rWFKqWX7'
b'IYsMXAKDpX+sBZS0DqxX4vHc2G52An47su21iHhtL9uNiLiNCJdrr1bDMuvPS3cD/NqwV/3U'
b'cFPSbuavJE1mm78CLUnjQKsr0PZy3f9Z0txPwBfgBBiR1PELdtC97fOaM3+oD2jnihCfxJ1i'
b'AAAAAElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_PANEL_ASSET_LIBRARY_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEISURBVDiNzdO9SgNBFIbhJyGgItrYpNVGwUoLQbBOaW3lFXgD3oGthVWKYOsdCFY2egva'
b'iY3iPwYTUIjFzOKwrCGjon4wnD27O+85Zz6G/65ajHPY+wbnBpspsIVD9PCWCRvHKybTly0M'
b'sP6F7tp4LpJG6eMGljKBy6iXgXcx3uIyE9jFYxnYjfEIJ5hJNpxjMeniPjawgmOsolmuMu/j'
b'DHfic7GmBbOKvBMLDAQj2jgrQHU/rLIp8CI5k9jJFSZi/jQKOB15FGWNvI1+spqCCcUZHgyr'
b'VDVyA2NJXqv451P9iim72E/yaywkxfuCcbMxDgVuyb/Pa1XAHk4xJTiYowdcZO75Q70DpdM6'
b'pbj/xAgAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_PANEL_NODE_REGISTRY_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEqSURBVDiNzZQhS0RBFIXPuQjCgkZhtVkUFgSNBm0W8Q9sEn+A2V9g2GA02ATxJ5htFsMi'
b'WAxi8aE2QVSUd4/l7WNc543Ms3jSvcy53xwY5gL/XRwVkvoAOn9gnZJ8rYHu/glgonUycpnk'
b'MAS4pGMzG2SyNiXVMyGQZtYBMJMJnA6bEPghqSdpO4dGcr4q374duHtRluVBZjpI6ru7JM2O'
b'JxwZltx9LzZsZmckT6oATyR3xj2xV30xs+uGQA+/JY4BpyStxMwk79sAn0leNPjv2gC71a/5'
b'IZICcJ4LvCW53+C/ScZrAPYkHcXMkg4BXOUCL0luxMwkH4N6F8AQwFojkOQigK1EgAVJYb/u'
b'7qtkvWMQbpsCQDcBS4rkHMki3IeT4QUtgO9tZ5P6AnipZrpK8g3nAAAAAElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_PANEL_NODE_REGISTRY_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAD5SURBVDiNzdShSkRBFMbx34IgCBoX3GhRWBDZaNBmEV/AJD6A2ScwGIwbbIL4CGabxSCC'
b'xSAWF7UJoqLgNdwVZi97Lsze4gfDnDPznT9nZmD472ol8TZmGrDO8J4ufKNoMFZgKgH+4ASH'
b'mZ1tpjUpsKU8cjsTOJcmKfALXexkAheG80d1Y4CjTBjlYxboMNrhn5axHxSf43QYv2C3ahgH'
b'fMNtAHyq6TQEzqIX+B8nAb7iMvA/TAKcV170OBW4yAXe4yDw39XBImAXx4G/j5tc4BU2Av9z'
b'Eu/hGmt1wCVs1TSwWMnXsRqZB5r9Nh1G/8PpSp6rzwa1sX4BxKhA1ClbQa4AAAAASUVORK5C'
b'YII=')
#----------------------------------------------------------------------
ICON_PANEL_IMAGE_VIEWPORT_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAE2SURBVDiNzZO9SgNBFIW/E0SU1d46rZV2gnUEFUErbQQ7JXbWIohaWNhZWfgENqYSfQAb'
b'H0JEGzURDAT84VjsrITNJmG00APLzD0z9+PenRn47xKA7TJw/AvOo6TVdmAFuABawEckbAh4'
b'l5R8O7YrTrUQW5rtE9uvWTyQW1+2PRHJnARKeeBzGJ+Au0hgE3jJA5thvJJ03ivb9iCApLcQ'
b'TwFj2XqpS1432DxQB+ph3qEoIDALJOGbK9qQP5T2akrAFnAk6TPY+8BwmO9FAYEqcAiUgQ0A'
b'SffAWo+c4pbDyzkI4brt3eDL9lk4iEJ1VBhaPQVG2uxt2w3S67UEzNhelHTZF0j6sxPgJuev'
b'kF6vzN+x/dAXKKkG1IraKZLtnsDNH7zn6SJgC7gGRoHxSGADuI3M+UN9AUk8dRLbNYW6AAAA'
b'AElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_PANEL_IMAGE_VIEWPORT_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEaSURBVDiNzdM9SwNBEMbxX4KIEu2t01hYaSdYRzBB0EobawutrEUQtbQTixR+AhutRD+A'
b'jR/AUgQbXyIYCPiCFreBY3MkuVjowDJzz879d+5mlv9uheDLOPoF5xFraWAFF2jhMydsBB8o'
b'pcUKvrE4QHV1vLUfhqLNFUznBM6gGAOfg3/CfU5gE68xsBn8Fc56AIaDfw9+FhPtzWJHener'
b'4SWsWlZCXuCCpJslVLMS4qbEh23hEF9B28doiPe6nTypc2w2g3bco+o6btNVZFkZByFex26I'
b'CziVNCLTsj65iBOMpbRtNCTjtYx5LOGyH2BV8tNvIn1VMl5tfQcP/QDPwxrIYuCG/Pd5LgvY'
b'wjXGMZUT2MBdznf+0H4A39svYzh8rVIAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_PANEL_NODE_PROPERTY_PANEL_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEZSURBVDiNzdQ9K4VxGMfxzyWZhLI5ZZOynM3gJTApizKYT1m9B+Q9eBkoBpMMKA9lF0ki'
b'g0wuw31O3d3OLecM+E3/6+nbdf2f+O+KziIzVzHcJ+cDOxHxVgY+YwzZZ1ONiLgbrAR3sYYl'
b'nEXEXma2MFIDO8RKuwZUgdrFTTy27RmM1wAv6trtjJy4qSmu0yQmdBn5HfvY6hE4j42OUQYm'
b'nnCP2W8ADxFxmplXWMdtOdhtD6ew/Q3wAKeKGzFUDX4BRsSx4iD60hdgZk5jsW1e4gitUsp5'
b'ROz+GIhRxbWBV8VYzVL8qacOI+IEyxV31a7VwE8TfwO4ieuqszryQmY2eoDOKV5KV+Ch4s3W'
b'fQR1elF8Ku891v2RPgHf4kEe/10hEAAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_PANEL_NODE_PROPERTY_PANEL_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAFqSURBVDiNzZQ/L0RBFMXPve/lzWxikehIdCLR6BQ+ApVEI1GoN9H6Dojv4GMsCYVKFEj8'
b'SfRCRIQo3jO7m3s0nqx9nvW24VRz7p35zT1TDPDfJfnCe79GcmhAjoUQdgGkn0Dn3LOIjALg'
b'IEOp6kSapndxd4dkE8C6qi6b2VkIYc8511DV4RLYIYBVkut5Ie7dEUXRsJnNisjjx80zJMe+'
b'o5G8UNXiuMBnZAK4+VXQHCAySXK8EFlE3sxsH8B2FWAURQsANnPfHZki8pQkyX2r1Zr7AfCQ'
b'pump9/6K5AaA2+5+4Q07nc6UiOyUAUkeADgFMKqqSW+/AMyy7BjATBmwnwpA59y0qi4BgJld'
b'hhCOarVaI++b2XkIoflroKqOkJz9sK/1ej1pt9u5h4g8VZowy7ITACu5DyGg2/eT9t9STQMD'
b'SW6Z2XVv/UtkEVn03k9U4M6TnCwDHgIYA1D2EZTpBUAzjuO3iuf+SO9+tYYYmSlw0QAAAABJ'
b'RU5ErkJggg==')
#----------------------------------------------------------------------
ICON_PANEL_NODE_GRAPH_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAD/SURBVDiNzZOxSoMxAIS/+9VVKE66iQgOQhWcHRzcXaXdBRdfwfoCPoGji4NOLn0FERfx'
b'LcSKSqnDOZhCTPPTP3bpQUg4Lh+XkMC8S+OF7QtgeQbWuaTXGDgClmYA7kp6qiLDwGWAloxu'
b'TF2M1gLawElhs7064BdQUX7shTB/5ICPwCGwkWw6k3Sfo9k+Bjph/x/gWOvAZuLtAFlgqmp6'
b'5Leh7dUmwVzDHtBKvBXgxvaBpFERUNJ1Lmh7C+jbvo3sIfA+rWGdroA+sB95b8BpHGp6hwDf'
b'TUIlDT+B58Qb/Bso6QHYTv3wDmuBR+HyS7RWB7xj8rk00QB4IfyU+dcPejs9g0qYLJgAAAAA'
b'SUVORK5CYII=')
#----------------------------------------------------------------------
ICON_PANEL_NODE_GRAPH_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AADzSURBVDiNzdMxSgNBFMbxX6JphWBlOhHBQkgKTyBibxu0t/QKah88hY2FVjZeQezEG1gq'
b'ERXRYi12hclmZ91hm3wwzOPjzX/ezLxh0dUJ4nOstGCd4iU0vpG1GCPoBsAMF+gljqOwquUg'
b'7mCI48Sj7sSAn0XFvUTgUjG/VwEfsI+N0qIT3EaAYxwW62eAf1rHZskb1QBn1P0/BXmFa00S'
b'qyo8Q7/kreIKu/L2SgJeRnK3cIfrwPvCWwz+jEnN5nvmm/lV/igZBjS/Q/hpklR15Jg+8Fjy'
b'pm2A99iu8Md1wAP55adoEAPemG+XJpriSfFTFl+/FyQ1xhQpUrcAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_PANEL_OBJECT_EDITOR_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAFESURBVDiNzZSxSgNBEIa/EUVUsFPQdBaiiIWlARuxEwTBIkQD6bQUH8NCS2ubNCrWPoKd'
b'CIqN2B0BsVGIYtDf4vZgOO5izmv8m52Znflm9tg9+O+yxJBUB0ZLsFpm1vHALjBYArhkZjce'
b'8A2cAocFQeu+xgON+MiTBYHj3vHAT2ABaBYEzoT1PQ18Ba7M7CBdIcmAtQxYG1gEVoA3gIGs'
b'lpIqklYljYXQELADfAHnwG6wm7lzS4okHUlaltRRrPsEKmkirC+SjpOYpHrInc6bsAaMBHse'
b'qAKY2XM6MSuWBbx2dhdoSDqRtJ97vF5AM2sBW8Ad8bdrAHvAxp+AAXoB3KbjkjaJL3Fb0mxW'
b'baGnZmaXv+X0Ap4BD85/6qdpGjgnadv5j34ztZeo6h3/t4mAqX6myFHFzCIPHPYNisrMPkoM'
b'k68fHaqCUFbeCJ0AAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_PANEL_OBJECT_EDITOR_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEkSURBVDiNzdQ9SwNBFIXhJxIRFey0iJ2FKCLYGrARO0EQLCQxkE5L8WdYaGltk0bF2p9g'
b'Kyg2YicBsVFQMfhR7BbjZhJd03hgmHvP3vvO7O4w/HcVgriCoR5YDTyHRgufPYw5KAbADxxi'
b'N+fOlsOeEFiQvPJYTuBImITAN8ygnhM4kc4vWeAjzrATaSpgKeI3MYsFPEFfh1XHsYjhNO/H'
b'Bt5xjM00rnfb+h32MC/5/Z+4CqCj6fyA/cCrpLWlTjtcx2AaT6OcxveR2jYvBjwP4hZqOMB2'
b'pLZNMWADa7iUfLsatrDyVyCc4CLir0oOcROTscZizOyi058KugGPcB3kt79ZMQucQjXIbzLP'
b'q9pVjnhIzmEvt02J7/fhQCbPq9ceejvrCxMUSpHwnMXkAAAAAElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_MODE_SHAPES_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAGFSURBVFiF7de/SsNgFIbx52g7urmoi6KTDg66KLgJooMODi5uzr0Cb8Ab0Kk3oJuD0mso'
b'QgVRwYI4CKWToigu8jpIof3a+CcnSTvk2UryHX5J+doEBjz764mSisAyMAWMAe9AA6ia2UMq'
b'uj/CJiWVJT0puktJe5KGs4SZpH1JHz/Awq4lzWWBK0g6/gesvRdJa2kDD2PiWr1Kmk8Lt+3E'
b'tbrT98ZKFFeUVE8IKEmlpIGbCeIkqe41DQWft7wDg2bk3NUhcNEzLKIFz+IQOO4ZFtGEZ3EI'
b'TOOfwLWTQ2DDMyyiR8/iEHjjGRaRa2YIPPUM61ETqHoGdDxuSRoB7oFRz9C2boGrGOtqZnbQ'
b'84ikUsI/1nE6b3nCrxjgCDiLcdWp1AU0s09gF6hlz+mu1x3EzJ6BFeAkW053PYEAZvZmZjvA'
b'OnCRHamzwm8nmFkFqEiaATaAaWAWWE3ZFj9JS/3cxQNVDvSWA73lQG850FsO9JYDvQ088Nfn'
b'wYiaQDlJSFCcN8H+9AV4AvHh+4oAUAAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_MODE_DRAW_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAGySURBVFiF7dchaFVRGMDxc8WpT5GhoMJgsLAgCGIQtIgMbAuaLAabmsRuMVrEaFDQMgVh'
b'mCxWN1gwCCuCiiDiHAZFmGM49zPsXHmO53vH8h3F/csL78L78Z173zk3pc3+0TBc2/DbMIpX'
b'uINttT2/1IVrm8H+2q6UUk9c22uM/624tgUcqQmc6oNr+4yT0bB9+bOD6QLk/UjcCN7gFrag'
b'wbU+uGkMReH2YL7rxx+gk7+7jNWauJ2Y7TGhORzI15zBUg3cEB73WcZ38tOK47gdiWtwtw+u'
b'7QsmQ1AbgDcLcG33onFX/wD3CFsjceexVoh7gu2RuNP4Voibwa5I3ASWC3Fz2B2JO4xPhbjn'
b'2BuJG8eHQtwL+c85CtfuryW9xEgkbjgvV0lvMRaJ6+BpIW4RByNxg/bX7j7iUCSudH9l/UR8'
b'NAyXgTcKcUs4EY27WIj7iolo3A7rZ7dBrah0dLpSgFvF2Rq4kul9x7lwXAYOmt4aLtXCdfC+'
b'z/02hWNVcBnYa3qLuI7RarCM23jvPcMF+X22enl6K3iIU7U9KaX088UFTUppOaU01jTNQj3S'
b'Zv9ZPwCQRXoGQWMRDAAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_MODE_EDITTEXT_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEBSURBVFiF7ZgxSgNREIa/CQHBLqawtE6nFxHBxi4p7byBIFZWHsBGSGcbvEq6HEBNmYjd'
b'b5GIS9yQgVl4a5ivmp2dmffB8IpdSGLYTyDpDjgv6LLJrZlNupXECXBayqaGPkCntMUuulvy'
b'M+DF0T8ALhx1j8CXo+4GOKx9I+lZv7w6hiHpSj6OnPPeKj0j+AcrTsEorRes3uIHYLyO5wVc'
b'AC6Bg3U8DU9r+hbX0foVp2CUFIySglFSMEoKRknBKCkYJQWjpGCUFIzSesFtf7f+IKkHDDfS'
b'Z872a0mfleelmT15Gm13yQpJA5r4mF7xbmbHnsL9WTHwAdw3dO6ioTnl+QYYeZDpIUHevAAA'
b'AABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_MODE_IMAGEINFO_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAL2SURBVFiF7ZhNiFVlGMd/z5Rj0pgao+4i+xAiXeQURpSUSMO4aBXuRWjrSnHZLAcVoYXS'
b'KqpNQZgtMshFblKQNJkUNIpAyVGqMZOaJp35tbj3yD3X986cL5sW/uHC+55znv/zO88597wf'
b'cF/1FHUN1GXAM8ATwADQD/wF/Ax8HxE/1c1RBeopdVQ9o844t66oH6gj6gP3GmxI/UydnQeq'
b'l35U31IfbBpsQD1YoFpFdVZ9oSm4Z9ULBZLOqO+om9SN6ph6e47rp9WddeFeUicLVmVfIn5/'
b'gbgDavk/q/qceqMgnOqLCY/hgrFjvTj6esCtAj4HHilxT4sTx1YVjN2tbi+cST1aonKZPlX7'
b'OjwG1PES8X+qa7tZ7nr26jbg48J3k9dp4AvgIWAb8FjJ+GMR8XpPQFvfp4u0RoWF0nBEfJl1'
b'ut/BN1lYOIA9nZ1uwB0NJroOfFch7lV1Tda584jVQeAqUHa8vAVcaMOMt3/nIuKyur7dL6vd'
b'EbEXoHNM3FQB7hfgyYi42eP8ogpwAJuBvZB/xEMVjFYC19Rj6kjifH8FT4Dns0Yn4NMVzZYA'
b'W4DXEueqVnBQXQF5wJUVzTKlplBVKwgwCHnAJTXMIF2tOhPUhyEPOFXDDNIVrKMpyANO1jSs'
b'vb7p0m+QB/yh4QR19HtE/Ap5wLMLBJPSt1mjE/A44H+OktZXWeMOYERMACcXBOduHc4a3ZOF'
b'9xpONFsh5lREnM863YAfAlcqwtxOHJup4JNbn+QAI2IaGK1gCs0AfgMcmfMKtU89UWItcUv9'
b'Wt2c8HpU/cRiq8N/1A2FbkN9XL1ewPSSuryA3yL10Dxeu1KxPb/+7YocJb2czDQD3JjLp0P9'
b'tMfXhN4HtkdEuc+c+oY6VaCSdfSRWnVaBurL6sQ9AJu1tX+T3DwoC7laPdIg3CV1a22wBOiI'
b'eqoG2KT6ttrrXWwM9BX1XVu7p/NpytaaZYe6tGyuJvao1wLrgDXACloT1z+ACVq7FGci4u+6'
b'ef63+hetVCIbjKF85wAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_MODE_MAGNIFICATION_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAK6SURBVFiFzZhLSBVRHId/R7HEJBEqqOzhRjO1goqCkCBaVYsosBblxmpvbYIoaFuLHsuo'
b'ZQ8qImgRpEVQlEiFES6EioRArehpSL6+FnObjscZ75254x0/uPC/h/n9z3fnzJ0590qzHBMn'
b'BKyQtE1StaRFkr5L6pf0TNIrY8x4YoYRxfYCz5meQeAEUFlIsSXAgyxiLp+BHfnOnXWJgQZJ'
b'9yVVWcOjkh5L6pS3tPMl1UjaLmm5HZd03BhzJl/RMLkq4KN1ViaAS8DSkOOLgWagz8kcmgk5'
b'Azy0JhoG9uSYrQQ6rOwfYFXSgvuca2p/xHw58NLKtyct2GU1vx6zxzpg1OrTmJRctXP26vPo'
b'ddvqE/nLUhQyvtWqe40xPfH0JEk3rLopajhMsMGqu6I2dei26jog0tMrTLDMqr9FVprMJ6uu'
b'kFQcJRwmaH/KeVGNHMqtesQYMxYlHCb41qpXR1aazBqrHowaDhN8Y9WbgKqQ43Jhp1U/zaPP'
b'f4A5mYf9P87G7LMQGLL6tCYimGl+zmo8AqyP0eOO1WMAKE1ScAHwxZmgLsesAS44N/u2xOSs'
b'iQ46k/wEWoHQWwWwEnjk5DqAsOt9WnLZD56SdNoZfi/pmrz94ICkUkm1knZlXiXWsUOSaowx'
b'/XEEcwI4hrdlikvyyxsguRF4kqPQmPN+BNg845IZ0S3ARaAbGLckPgD3gBagEfjhSL4DKgoi'
b'6QiXAGUB480BZ/ZmwQWnA7gSIHk4bS8foBR47QgOA2vTdvMB6oHfjmRP0GWRGsCRgKW+nLbX'
b'JICrAZIH0vbywfsZ2usI/gJq03bzATYw9Yn0ApibtpsPcDRgqc+n7eWDtxW76whOALvTdvPB'
b'+++mz5H8ivfH6OwAaGLqpqITKMmeLhDASUewF1iWtpcPUAS0Z+RukcZuJxvAYqAlbY/Y/AU3'
b'GCLQolAzYQAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_MODE_MOVE_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAJeSURBVFiF7Zi9axRBGId/bwyCHyQqFopGJH6gEhIDJmlEsPGvEMROUFCx1SYIIoKIaCGE'
b'xF6wsFMRBEu1EAtREtD4kQtGTBQFUXgsdlfm9vZ2Z+Jt9oo81bDzzjvPzszNzK20TBsB7AUe'
b'AZurdmkglpsh4hWwpWqnfwADwGfqeQP0VO3WTC7hLbCjSrn9OXIJ08CuquTmCuQSZoC+dpVL'
b'mAX6Q/uyRcitkHRO0oaM6lOS1sTlq5J+x+Vfkn5KmpM0bmaE9tsSgJozYqtbkbOjFUnKpFAQ'
b'6ABOtrpjoA8YKYrLFQRM0g1Jx1ol5rBW0n0fyaYAV+L19NQz3nsNAiNx3PyiJIFLTmfPShAc'
b'dmLngeEQuYupPaxsQX9J4ELjHsvzEgSHMvr5Cgy5cZ2pRmckjWbkWw9MeDh2OeVbwJ+c2I0Z'
b'z9ZJegAcMbP6dQ8cz3ijhFpOXRl8ATZJ9dvMQ0mTHqO0FNw0s5qUOouBrZIeS0rf4V5LOu+R'
b'eExSd1w+qugMbkavpMsZz6+Z2dmmrYAeYDI15C885EJ/JIMZU3s9HddwkpjZe0mHJU25j30E'
b'A0nfaMYlnfZvHY3kVPxmLz3bhIzggBN7Gwi/uADbYskyBe8AnXmxPpL3ShDsB+7+l5yTrLs4'
b'KliwC1hZlNPL3swWfOJCMLNvPnFtf6MOnv94Wk5IWpVR/UPSrKJtaZTGs/iDolOi3D9NwCHg'
b'e8ZGm8c0sLNUsZTkQWAhQG7pP4EAB4huHu0n50gO0vz7TLVyjuQ+4FNK7l1byCUAe4CPjlxv'
b'1U4NALuBJ8D2ql2WSfgLxvNnV/V80n8AAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_MODE_NODEFRAMES_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AALcSURBVFiF7Vg5ixRBGH2vj+3FxFuUNRAP0EBhjQy8Ml000NRfIYiJbCCIBgaKiSCoqaLg'
b'iQYmIpjJuguKJopnIq6CCuJYxzPQ3emu7pnpnR1wgnnQUHzf+773prqqu6eAAQb4v+DMQNIS'
b'AGmQnybp8oEWvJ8kfwS8YQALKzS/kLQBdzGAobY878xTb43yl/RrXdjdOzMV8py1F0KerD0U'
b'8rw10u/fW0s9rXnUihdV/MK+wsDgfJHMDAjc8uBkPklk38MCATcBTuRjEfC41Nn714qiy2GY'
b'aTJdihH3vPiqiseQXBeSVnrnTnRbDwBR7M6S2Yt2nHkYbGyUi152Ww8AFPYwTR+04/T9GuyV'
b'wSeMk+E6l4Rzc2k8u0mcMSdArSm4j9MjAL7Dub0VtYsAzYxFslFH0BnjwpicOealTYH2OMl3'
b'zV0cYR/E0WJp4ziQpYJulpQ839QxVAcS9hDcWQgacxbAu75fg80ZFMcBLC2ms0/IvfAlnI6k'
b'awCAyK0S4ru9MEHhJMiLhWCavC0aTJL7VcWSZg1GkT4wHpr4G29sRGk1dWmwzaOm72/xwOB8'
b'MbsGvTX3AWzOJxkn2wHYsKjX8NZcB7CtoO38GLPsedKMYCWE1cXSRgJknQ0Ki2XtwTpmBK2v'
b'CK8AAm1yCMjN4LxAbBB0oye9AjQNCj8AfCum1aMHSUeUtSUL5AxGSbqrqlLSSHXPoY+EO9Ba'
b'U4cFLIDwieSlSkoST/7T3t+qy5xusfdcJ2N2AACsBYCvLe0RowAWIsIU4+T2XHTy6PjBKmlE'
b'zn7sVgDEVBSno52J1agzgx5A6b9Jbfxd212jebJgzG6QiwrZOH5A8mdBr9HYgjheG/DukPQF'
b'nrVjALKSYhw/JFnYEDJmO8hlbXl1TxactefLPJWMeGumBycL/YC+N5j/oj4F+uXFdPY5LIik'
b'K4j0LIxLKh7JOXMUxHBJMU3fhyGCZ0B/tRNvgAH+B/4Am5+fzHc5K18AAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_MODE_PAINT_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAACgAAAAoCAYAAACM/rhtAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAHlSURBVFiF7djPi41RGAfw5yWbGbEwqGmaomRjMgsWkmTlH1BKKbOg7KwUC5SUoiSKWExK'
b'ip2Fwk5Zq0uTpV/JZCWZZlLmY/FeY2buve9775257xnluz6d8+ntPM95z4n4n86CU7iW2tE0'
b'OCPPo9SWhuCcvzmZ2rMguDAP9xn9qU1zwUULM5baNBdcWoSrYXVqVyDDdY05mNpWhHua2vYH'
b'd7MJ7hdGVwLuRhMcPFgJuFstcD+xLTXwSgsc3E6NO1uAm8ZQStwYZguAl1Pi9mGmAPcF61Lh'
b'tuBrAQ4Op8L14U0J7nkSXB04XoL7huFUuCMlODiWCrcDUyW4x6lw/ZgowX3AQCrgvRLcDHal'
b'wh1vY9+d6Hb+VUvEZRExGBEvCobdzbLszlLWWZZgD94u+nLPsKZqyCZcxSf5OftefvHpw0a8'
b'ruNqqj7KsBXvWuyzGgYwUq/Yapux/KfzZUkxPKmPXV8prr7ooTaqlWVuJ51U8ek2x+3vBtIq'
b'bQFxICJ2tznnhu45jSkFynvd+Q7mnOye00VwtM29R952tleJ24sfHQCruaFhUP4kNt0B7pVe'
b'N2aMyk+GTjKLh1jbU9w85E7cx2QJbEL+CDTSS09Wgh2OiKGI2BwRUxExG3mVfsyy7HsvYf9M'
b'fgOPtZYFSq8ckgAAAABJRU5ErkJggg==')
#----------------------------------------------------------------------
ICON_RENDER_IMAGE_LIGHT = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAFLSURBVDiNzdK/S5VhGMbxzy0HiVAUEkXcy8nBpSWooaFfuAlOtgnpkP+FYkvTmVsNair6'
b'A1rCP6AoUAppEhelwRNyO/gIp7f3vOeczQtuXm6um+9z3c/zct0VvYzMnML4gJxORPzuB3yD'
b'ZzjpAxvFn4i4UwdpZeaTUu8zc6NftMxczMzvV32r4o/hAz4ZfN1/1Cqn3MJL3MB5RDwtK9du'
b'gc+Yxb2qP1K+U9jE3wFCjJT5SZf3VwuEU7zqkWosM19ARHQwj+mIOGgCNmkb7cxcK/1MSfmf'
b'qo9Sl+4+1kvbzsxjLONuZj4cGojb2OnqF/Cz1CN8GQY4h13sNczMDwr8hSU87uHfxDnO8LUJ'
b'GJm5gm+lqppx+Uc8x25EtJsSdvC2JOulB9jHIX5UzW7gBLZw1AC7OvR1RLyrMwMycxKrfUDd'
b'+hgR+0PMXyNdAHQnbBx7Ro5nAAAAAElFTkSuQmCC')
#----------------------------------------------------------------------
ICON_RENDER_IMAGE_DARK = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEmSURBVDiN1dRPK0RRHMbxz0ySNDJlMsmeWVnY2CgWFv5lp6zYKSx4F8TGata2FCvyAmzk'
b'BRBFJCvZkIWRsrhn6jZ/7lxWPPXrnl/POd/7nHtPh7+uTIJXQFdKTgVPrYB7mMVrC1g73jHY'
b'yGzDdKgjrKVIN4zrOCCuHI5xKv126xJBD9bRgS/MiLbcbM0Z+jBaa2bDs4ANfKYIkQ3z86Lv'
b'1xAIb9hpAslhJYwrKKEXd0nAJG2hjOXQF0PKOtX+lEYaw2oYl/GCeYxg4jfAAWzH+iHch5rE'
b'+U+A/djHRcKcUlrgA+Yw1cTvFB2xD1wmATNYwFWoWhVFJ2JJlL6clLCCg5CsmcZxi0fc1Jpx'
b'YDc28ZwAq750F4eNzOptk8diC1BcJyHlP9Q3XoMwQFZIQmgAAAAASUVORK5CYII=')
#----------------------------------------------------------------------
ICON_GIMELSTUDIO_LOGO = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAABHNCSVQICAgIfAhkiAAAAAlw'
b'SFlzAAAOxAAADsQBlSsOGwAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA'
b'AAEqSURBVDiN5dO/ahRhGIXx3xsWl6jBIv4pRLAIIVikMwYEtbCylMUqV5C7iBBEENIETJWb'
b'sLZIEwtbq2AsLKzSCGoXjkUmsobdnYkLNjnNfPPxzDPnZb7hwqWGb5L0sYGH2Ec6ehaqavDX'
b'TpI7SXaSLCTZOk+rJK9O1zPNxl28xUscDjdLUkm2k1ztIu8111l8wnNcx4ch5jHWMEhyhNWq'
b'+tEmPMZBVe2OYOaaSW6hjwd4P0440zZCVb3DJvZwuXnB2LQKG+nrqnqCz86cjH8SDmW+Degs'
b'THIJN9q4XhuQ5DYGeNEUuDaJ79JwF2+wjF+4P61wBT/xDetYmgS3joxnuOLknH5t/vdOwntJ'
b'nk5gF5Ms4ssI7ubpovjzBR91aDsu36vq4xTP/8f8BjkhVz0eggmvAAAAAElFTkSuQmCC')
| [
"correctsyntax@yahoo.com"
] | correctsyntax@yahoo.com |
a5373811bc314524609755af1eb7c1cf21b3e2e9 | b2644f42e5645f74d33e0c64ad1183172ce2be88 | /handpose/utils/image/__init__.py | 3e8695f9eb9756e8424bb94bdeb3284989271ed0 | [] | no_license | YanWanquan/handpose | e9282b2a8485f52d8b286c20a0f0d3f1e97b7b0b | a755ff80011007ba124ff5cd4c47f0c99ca28b8b | refs/heads/master | 2023-03-17T07:55:04.049356 | 2020-03-20T06:02:12 | 2020-03-20T06:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from __future__ import absolute_import
from .image_utils import *
from .colors import *
from .draw_detection import draw_detection
| [
"tsai.tsunghua@gmail.com"
] | tsai.tsunghua@gmail.com |
0a157e41b41803be90eeb623662ffb1759f363f5 | d324b3d4ce953574c5945cda64e179f33c36c71b | /php/php-sky/grpc/src/python/grpcio_tests/tests/unit/_rpc_part_2_test.py | cfc11baddff39543138f63ae0e742d6a16e53802 | [
"Apache-2.0"
] | permissive | Denticle/docker-base | decc36cc8eb01be1157d0c0417958c2c80ac0d2f | 232115202594f4ea334d512dffb03f34451eb147 | refs/heads/main | 2023-04-21T10:08:29.582031 | 2021-05-13T07:27:52 | 2021-05-13T07:27:52 | 320,431,033 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,869 | py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of RPCs made against gRPC Python's application-layer API."""
import itertools
import threading
import unittest
import logging
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit._rpc_test_helpers import (
TIMEOUT_SHORT, Callback, unary_unary_multi_callable,
unary_stream_multi_callable, unary_stream_non_blocking_multi_callable,
stream_unary_multi_callable, stream_stream_multi_callable,
stream_stream_non_blocking_multi_callable, BaseRPCTest)
from tests.unit.framework.common import test_constants
class RPCPart2Test(BaseRPCTest, unittest.TestCase):
def testDefaultThreadPoolIsUsed(self):
self._consume_one_stream_response_unary_request(
unary_stream_multi_callable(self._channel))
self.assertFalse(self._thread_pool.was_used())
def testExperimentalThreadPoolIsUsed(self):
self._consume_one_stream_response_unary_request(
unary_stream_non_blocking_multi_callable(self._channel))
self.assertTrue(self._thread_pool.was_used())
def testUnrecognizedMethod(self):
request = b'abc'
with self.assertRaises(grpc.RpcError) as exception_context:
self._channel.unary_unary('NoSuchMethod')(request)
self.assertEqual(grpc.StatusCode.UNIMPLEMENTED,
exception_context.exception.code())
def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = unary_unary_multi_callable(self._channel)
response = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = unary_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request,
metadata=(('test',
'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual('', call.debug_error_string())
def testSuccessfulUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = unary_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request,
metadata=(('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
request = b'\x37\x58'
expected_responses = tuple(
self._handler.handle_unary_stream(request, None))
multi_callable = unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSuccessfulStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
response = multi_callable(
request_iterator,
metadata=(('test',
'SuccessfulStreamRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request_iterator,
metadata=(
('test',
'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
requests = tuple(
b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
expected_responses = tuple(
self._handler.handle_stream_stream(iter(requests), None))
request_iterator = iter(requests)
multi_callable = stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSequentialInvocations(self):
first_request = b'\x07\x08'
second_request = b'\x0809'
expected_first_response = self._handler.handle_unary_unary(
first_request, None)
expected_second_response = self._handler.handle_unary_unary(
second_request, None)
multi_callable = unary_unary_multi_callable(self._channel)
first_response = multi_callable(first_request,
metadata=(('test',
'SequentialInvocations'),))
second_response = multi_callable(second_request,
metadata=(('test',
'SequentialInvocations'),))
self.assertEqual(expected_first_response, first_response)
self.assertEqual(expected_second_response, second_response)
def testConcurrentBlockingInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = pool.submit(
multi_callable,
request_iterator,
metadata=(('test', 'ConcurrentBlockingInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
pool.shutdown(wait=True)
self.assertSequenceEqual(expected_responses, responses)
def testConcurrentFutureInvocations(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'ConcurrentFutureInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
self.assertSequenceEqual(expected_responses, responses)
def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
request = b'\x67\x68'
expected_response = self._handler.handle_unary_unary(request, None)
response_futures = [None] * test_constants.THREAD_CONCURRENCY
lock = threading.Lock()
test_is_running_cell = [True]
def wrap_future(future):
def wrap():
try:
return future.result()
except grpc.RpcError:
with lock:
if test_is_running_cell[0]:
raise
return None
return wrap
multi_callable = unary_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
inner_response_future = multi_callable.future(
request,
metadata=(
('test',
'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
outer_response_future = pool.submit(
wrap_future(inner_response_future))
response_futures[index] = outer_response_future
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
self.assertEqual(expected_response, response_future.result())
with lock:
test_is_running_cell[0] = False
def testConsumingOneStreamResponseUnaryRequest(self):
self._consume_one_stream_response_unary_request(
unary_stream_multi_callable(self._channel))
def testConsumingOneStreamResponseUnaryRequestNonBlocking(self):
self._consume_one_stream_response_unary_request(
unary_stream_non_blocking_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
self._consume_some_but_not_all_stream_responses_unary_request(
unary_stream_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesUnaryRequestNonBlocking(self):
self._consume_some_but_not_all_stream_responses_unary_request(
unary_stream_non_blocking_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
self._consume_some_but_not_all_stream_responses_stream_request(
stream_stream_multi_callable(self._channel))
def testConsumingSomeButNotAllStreamResponsesStreamRequestNonBlocking(self):
self._consume_some_but_not_all_stream_responses_stream_request(
stream_stream_non_blocking_multi_callable(self._channel))
def testConsumingTooManyStreamResponsesStreamRequest(self):
self._consume_too_many_stream_responses_stream_request(
stream_stream_multi_callable(self._channel))
def testConsumingTooManyStreamResponsesStreamRequestNonBlocking(self):
self._consume_too_many_stream_responses_stream_request(
stream_stream_non_blocking_multi_callable(self._channel))
def testCancelledUnaryRequestUnaryResponse(self):
request = b'\x07\x17'
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
response_future.cancel()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
def testCancelledUnaryRequestStreamResponse(self):
self._cancelled_unary_request_stream_response(
unary_stream_multi_callable(self._channel))
def testCancelledUnaryRequestStreamResponseNonBlocking(self):
self._cancelled_unary_request_stream_response(
unary_stream_non_blocking_multi_callable(self._channel))
def testCancelledStreamRequestUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
self._control.block_until_paused()
response_future.cancel()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIsNotNone(response_future.initial_metadata())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testCancelledStreamRequestStreamResponse(self):
self._cancelled_stream_request_stream_response(
stream_stream_multi_callable(self._channel))
def testCancelledStreamRequestStreamResponseNonBlocking(self):
self._cancelled_stream_request_stream_response(
stream_stream_non_blocking_multi_callable(self._channel))
def testExpiredUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x17'
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
timeout=TIMEOUT_SHORT,
metadata=(('test',
'ExpiredUnaryRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x17'
callback = Callback()
multi_callable = unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
timeout=TIMEOUT_SHORT,
metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_future.exception().code())
def testExpiredUnaryRequestStreamResponse(self):
self._expired_unary_request_stream_response(
unary_stream_multi_callable(self._channel))
def testExpiredUnaryRequestStreamResponseNonBlocking(self):
self._expired_unary_request_stream_response(
unary_stream_non_blocking_multi_callable(self._channel))
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
78cf9c6623be1565196efc57f24c7d06f933e7ac | 91a41c5ce70819a69dfc6f6f2b94f862244267da | /commands/Translate.py | 30280c6d98fd5c567a8bc61273ba085bd72bd7c0 | [] | no_license | mions1/silvia | 9e489a5537d125acdffc3089c50ffa766b739738 | f2e5bf7d0c8cf1a90fcb8a663380aff91a6f63a3 | refs/heads/master | 2023-03-18T22:43:53.610163 | 2021-03-09T15:36:11 | 2021-03-09T15:36:11 | 346,031,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | from . import Command
from google.cloud import translate_v2 as translate
from commands import builtin as bin
class Translate():
LANGUAGES = {
'af': 'africano',
'sq': 'albanese',
'ar': 'arabo',
'hy': 'armeno',
'bn': 'Bengali',
'ca': 'catalano',
'zh': 'cinese',
'zh-cn': 'Chinese (Mandarin/China)',
'zh-tw': 'Chinese (Mandarin/Taiwan)',
'zh-yue': 'Chinese (Cantonese)',
'hr': 'croato',
'cs': 'Czech',
'da': 'Danish',
'nl': 'tedesco',
'en': 'inglese',
'en-au': 'English (Australia)',
'en-uk': 'English (United Kingdom)',
'en-us': 'English (United States)',
'eo': 'Esperanto',
'fi': 'Finnish',
'fr': 'French',
'de': 'German',
'el': 'Greek',
'hi': 'Hindi',
'hu': 'Hungarian',
'is': 'Icelandic',
'id': 'Indonesian',
'it': 'italiano',
'ja': 'giapponese',
'ko': 'Korean',
'la': 'Latin',
'lv': 'Latvian',
'mk': 'Macedonian',
'no': 'Norwegian',
'pl': 'Polish',
'pt': 'portoghese',
'pt-br': 'Portuguese (Brazil)',
'ro': 'Romanian',
'ru': 'Russian',
'sr': 'Serbian',
'sk': 'Slovak',
'es': 'spagnolo',
'es-es': 'Spanish (Spain)',
'es-us': 'Spanish (United States)',
'sw': 'Swahili',
'sv': 'Swedish',
'ta': 'Tamil',
'th': 'Thai',
'tr': 'Turkish',
'vi': 'Vietnamese',
'cy': 'Welsh'
}
def __init__(self, text):
self.text = text
pass
def elaborazione(self):
split = self.text.split()
pre = "come si dice".split()
self.phrase = " ".join(split[len((self.text[:self.text.index(" ".join(pre))]).split()) + len(pre):-2])
self.language = split[-1]
self.lang_tag = self.get_tag(self.language)
pass
def esecuzione(self):
translate_client = translate.Client()
translation = translate_client.translate(
self.phrase, target_language=self.lang_tag)
self.from_lang_tag = translation["detectedSourceLanguage"]
from_language = self.get_lang(self.from_lang_tag)
print(u'Language from: {}'.format(from_language + "=" + self.from_lang_tag))
print(u'Language to: {}'.format(self.language + "=" + self.lang_tag))
print(u'Text: {}'.format(self.phrase))
print(u'Translation: {}'.format(translation['translatedText']))
# s = phrase+" " if len(phrase.split()) < 4 else ""
# s += "in "+language+" si dice "
# say(s)
self.translation = translation['translatedText']
pass
def risposta(self):
return self.phrase+" in "+self.language+" si dice "+self.translation
def run(self):
self.elaborazione()
self.esecuzione()
return self.risposta()
def get_tag(self, language="inglese"):
for key in Translate.LANGUAGES:
if Translate.LANGUAGES[key] == language:
return key
return None
def get_lang(self, tag="en"):
return Translate.LANGUAGES[tag] | [
"simone.mione1@gmail.com"
] | simone.mione1@gmail.com |
a74be285641ea899eccb1367db3ce61e01d8b919 | caa175a933aca08a475c6277e22cdde1654aca7b | /tests/models/product/product_relation_types/test_relations.py | 745370f316f860b8d7729382fd9163a74d6c8c2a | [
"MIT"
] | permissive | simonsobs/acondbs | 01d68ae40866461b85a6c9fcabdfbea46ef5f920 | d18c7b06474b0dacb1dcf1c6dbd1e743407645e2 | refs/heads/main | 2023-07-07T04:33:40.561273 | 2023-06-28T22:08:00 | 2023-06-28T22:08:00 | 239,022,783 | 0 | 1 | MIT | 2023-06-26T20:36:39 | 2020-02-07T21:07:46 | Python | UTF-8 | Python | false | false | 1,920 | py | import pytest
from flask import Flask
from acondbs.db.sa import sa
from acondbs.models import ProductRelationType
@pytest.fixture
def app(app_empty: Flask) -> Flask:
y = app_empty
#
# +--------+ +-------+
# | | --(reverse)-> | |
# | parent | | child |
# | | <-(reverse)-- | |
# +--------+ +-------+
#
#
# +------- -+
# | | --(reverse)-+
# | sibling | |
# | | <-----------+
# +------ --+
#
parent = ProductRelationType(name='parent')
child = ProductRelationType(name='child')
parent.reverse = child
assert child.reverse == parent
sibling = ProductRelationType(name='sibling')
sibling.reverse = sibling
# commit
with y.app_context():
sa.session.add(parent)
sa.session.add(sibling)
sa.session.commit()
return y
def test_reverse(app: Flask) -> None:
with app.app_context():
parent = ProductRelationType.query.filter_by(name='parent').one()
child = ProductRelationType.query.filter_by(name='child').one()
assert child is parent.reverse
assert parent is child.reverse
def test_self_reverse(app: Flask) -> None:
with app.app_context():
sibling = ProductRelationType.query.filter_by(name='sibling').one()
assert sibling is sibling.reverse
def test_cascade(app: Flask) -> None:
# delete parent
with app.app_context():
parent = ProductRelationType.query.filter_by(name='parent').one()
sa.session.delete(parent)
sa.session.commit()
# assert
with app.app_context():
parent = ProductRelationType.query.filter_by(name='parent').one_or_none()
child = ProductRelationType.query.filter_by(name='child').one_or_none()
assert parent is None
assert child is None
| [
"tai.sakuma@gmail.com"
] | tai.sakuma@gmail.com |
7f61e83b66c2db625d75c713ddfe56f43b63fe62 | 73d5c11866f739ea0f4cbdb86662e9c11d9c081a | /if_test.py | f8054c582bf14fbed0b1f4ba322f88c347869fd1 | [] | no_license | qorud02/changwonai | d9de4adbc3604be2706bfe0df8184cd60f89c0f4 | dcf9996ccc51a674681d3639c2b57a85444146ac | refs/heads/main | 2023-01-25T03:14:06.616372 | 2020-11-22T09:39:16 | 2020-11-22T09:39:16 | 314,939,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # print(type(3>5))
# print(6>5)
# if 6>5:
# print("Do 1")
# print("Do 2")
# else:
# print("Not doing")
# 나이 = 15
# if 나이 < 20:
# # print("청소년 할인")
# 걸음 = intinput("걸음을 입력해주세요")
# if 걸음 >= 1000:
# print("목표 달성")
# 시간 = 13
# if 시간 < 12:
# print("오전입니다")
# else:
# print("오후입니다")
import word
word.print_word() | [
"you@example.com"
] | you@example.com |
3eb1f20a7f22613daa82fc565db909c67c1988bc | 105212e4d2d2175d5105e05552e29b300375e039 | /DL/RL/deepmind/pysc2-examples/demo.py | 29cd7855d8c76d513357636f8fded269594a67e6 | [
"Apache-2.0"
] | permissive | Asher-1/AI | 84f0c42651c0b07e6b7e41ebb354258db64dd0d1 | a70f63ebab3163f299f7f9d860a98695c0a3f7d5 | refs/heads/master | 2022-11-26T07:24:37.910301 | 2019-05-30T13:04:31 | 2019-05-30T13:04:31 | 160,031,310 | 7 | 1 | null | 2022-11-21T22:02:53 | 2018-12-02T09:19:03 | Jupyter Notebook | UTF-8 | Python | false | false | 2,038 | py | import sys
import os
import pdb
from absl import flags
from pysc2.env import sc2_env
from pysc2.lib import actions as sc2_actions
import os
import datetime
from pysc2.env import environment
import numpy as np
from common.vec_env.subproc_vec_env import SubprocVecEnv
import random
import time
FLAGS = flags.FLAGS
def construct_action(marine_num, x, y):
move_action = []
# Base action is choosing a control group.
# 4 == select_control_group
move_action.append(
sc2_actions.FunctionCall(4, [[0], [marine_num]]))
# Right click.
# 331 == Move
move_action.append(
sc2_actions.FunctionCall(331, [[0], [int(x), int(y)]]))
return move_action
def get_position(env, marine_num):
"""Get position by selecting a unit.
This function has a side effect, so we return rewards and dones.
"""
select_action = construct_action(marine_num, -1, -1)
_, rs, dones, _, _, _, selected, _ = env.step([select_action])
xys = []
for s in selected:
pos = s.nonzero()
x = pos[1][0]
y = pos[2][0]
xys.append((x, y))
return xys, rs, dones
def main():
FLAGS(sys.argv)
env = SubprocVecEnv(1, 'CollectMineralShards')
env.reset()
total_reward = 0
for _ in range(1000):
marine = random.randrange(2)
x = random.randrange(32)
y = random.randrange(32)
print('Move %d to (%d, %d)' % (marine, x, y))
move_action = construct_action(marine, x, y)
# This controls the APM.
for _ in range(7):
obs, rs, dones, _, _, _, selected, screens = env.step([move_action])
total_reward += rs
# Querying the position
m_pos = {}
m_pos['0'], rs, dones = get_position(env, 0)
total_reward += rs
m_pos['1'], rs, dones = get_position(env, 1)
total_reward += rs
print(rs)
print(dones)
print('Total reward: ', total_reward)
print(m_pos)
env.close()
if __name__ == '__main__':
main()
| [
"ludahai19@163.com"
] | ludahai19@163.com |
e1b6bb2e250e6b34c211768511bd921692120eaf | 2837519560abb55b83ed9a0ff19fe468568057a8 | /flood_fill.py | 63c6e40f4a5d2e58d7c475dd9e75a0b37e3a94bb | [] | no_license | shahakshay11/DFS-1 | 47df064f2326805444647399c0f07bb1c56ffd53 | 9d06719ab8432b837dc6361b381a34c2663f8117 | refs/heads/master | 2022-04-18T12:21:07.249034 | 2020-04-17T05:09:41 | 2020-04-17T05:09:41 | 256,349,677 | 0 | 0 | null | 2020-04-16T23:06:32 | 2020-04-16T23:06:31 | null | UTF-8 | Python | false | false | 1,432 | py | """
// Time Complexity : O(m*n)
// Space Complexity : O(m*n)
// Did this code successfully run on Leetcode : No
// Any problem you faced while coding this : Ending recursion
// Your code here along with comments explaining your approach
Algorithm explanation
DFS
- Idea is to run DFS on sr,sc in the matrix and update the value of all
the values that have 1
"""
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
"""
:type image: List[List[int]]
:type sr: int
:type sc: int
:type newColor: int
:rtype: List[List[int]]
"""
def dfs(i,j,matrix,m,n,origColor):
matrix[i][j] = newColor
directions = [(1,0),(0,1),(-1,0),(0,-1)]
for x,y in directions:
valx = x + i
valy = y + j
#We need to move only to elements having orig and new color different or else recursion will be endless
if valx >=0 and valx < m and valy >=0 and valy < n and matrix[valx][valy] == origColor:#and newColor != origColor:
dfs(valx,valy,matrix,m,n,origColor)
if not image or not image[0] or image[sr][sc] == newColor: #-> this was essentially to avoid back and forth and image won't change
return image
m = len(image)
n = len(image[0])
dfs(sr,sc,image,m,n,image[sr][sc])
return image | [
"akshay.vjti11@gmail.com"
] | akshay.vjti11@gmail.com |
2c6e6b8017bdb7a5aa4d63abd9ca3ec0a3126253 | fca120e66c06b1e3637c9b7463ee5769afc9af70 | /galaxy/tools/cwl/util.py | 4ece8d450fe43b7b05fc738f96c37f30da74cfc0 | [
"AFL-3.0",
"CC-BY-2.5",
"AFL-2.1",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jerowe/galaxy-lib | fb49cf08538b9702b1b1bb349b4a9757950bd18c | ea29d1d3ee4c28eb3e76daecca9465a1c67b571c | refs/heads/master | 2021-01-23T02:16:13.467881 | 2017-09-01T01:46:38 | 2017-09-01T01:46:38 | 102,438,112 | 0 | 0 | null | 2017-09-05T05:31:28 | 2017-09-05T05:31:28 | null | UTF-8 | Python | false | false | 6,924 | py | """Client-centric CWL-related utilities.
Used to share code between the Galaxy test framework
and other Galaxy CWL clients (e.g. Planemo)."""
import hashlib
import json
import os
from collections import namedtuple
from six import iteritems, StringIO
def output_properties(path=None, content=None):
checksum = hashlib.sha1()
properties = {
"class": "File",
}
if path is not None:
properties["path"] = path
f = open(path, "rb")
else:
f = StringIO(content)
try:
contents = f.read(1024 * 1024)
filesize = 0
while contents != "":
checksum.update(contents)
filesize += len(contents)
contents = f.read(1024 * 1024)
finally:
f.close()
properties["checksum"] = "sha1$%s" % checksum.hexdigest()
properties["size"] = filesize
return properties
def galactic_job_json(job, test_data_directory, upload_func, collection_create_func):
"""Adapt a CWL job object to the Galaxy API.
CWL derived tools in Galaxy can consume a job description sort of like
CWL job objects via the API but paths need to be replaced with datasets
and records and arrays with collection references. This function will
stage files and modify the job description to adapt to these changes
for Galaxy.
"""
datasets = []
dataset_collections = []
def upload_file(file_path):
if not os.path.isabs(file_path):
file_path = os.path.join(test_data_directory, file_path)
_ensure_file_exists(file_path)
upload_response = upload_func(FileUploadTarget(file_path))
dataset = upload_response["outputs"][0]
datasets.append((dataset, file_path))
dataset_id = dataset["id"]
return {"src": "hda", "id": dataset_id}
def upload_object(the_object):
upload_response = upload_func(ObjectUploadTarget(the_object))
dataset = upload_response["outputs"][0]
datasets.append((dataset, the_object))
dataset_id = dataset["id"]
return {"src": "hda", "id": dataset_id}
def replacement_item(value, force_to_file=False):
is_dict = isinstance(value, dict)
is_file = is_dict and value.get("class", None) == "File"
if force_to_file:
if is_file:
return replacement_file(value)
else:
return upload_object(value)
if isinstance(value, list):
return replacement_list(value)
elif not isinstance(value, dict):
return upload_object(value)
if is_file:
return replacement_file(value)
else:
return replacement_record(value)
def replacement_file(value):
file_path = value.get("location", None) or value.get("path", None)
if file_path is None:
return value
return upload_file(file_path)
def replacement_list(value):
collection_element_identifiers = []
for i, item in enumerate(value):
dataset = replacement_item(item, force_to_file=True)
collection_element = dataset.copy()
collection_element["name"] = str(i)
collection_element_identifiers.append(collection_element)
collection = collection_create_func(collection_element_identifiers, "list")
dataset_collections.append(collection)
hdca_id = collection["id"]
return {"src": "hdca", "id": hdca_id}
def replacement_record(value):
collection_element_identifiers = []
for record_key, record_value in value.items():
if record_value.get("class") != "File":
dataset = replacement_item(record_value, force_to_file=True)
collection_element = dataset.copy()
else:
dataset = upload_file(record_value["location"])
collection_element = dataset.copy()
collection_element["name"] = record_key
collection_element_identifiers.append(collection_element)
collection = collection_create_func(collection_element_identifiers, "record")
dataset_collections.append(collection)
hdca_id = collection["id"]
return {"src": "hdca", "id": hdca_id}
replace_keys = {}
for key, value in iteritems(job):
replace_keys[key] = replacement_item(value)
job.update(replace_keys)
return job, datasets
def _ensure_file_exists(file_path):
if not os.path.exists(file_path):
template = "File [%s] does not exist - parent directory [%s] does %sexist, cwd is [%s]"
parent_directory = os.path.dirname(file_path)
message = template % (
file_path,
parent_directory,
"" if os.path.exists(parent_directory) else "not ",
os.getcwd(),
)
raise Exception(message)
class FileUploadTarget(object):
def __init__(self, path):
self.path = path
class ObjectUploadTarget(object):
def __init__(self, the_object):
self.object = the_object
GalaxyOutput = namedtuple("GalaxyOutput", ["history_id", "history_content_type", "history_content_id"])
def output_to_cwl_json(galaxy_output, get_metadata, get_dataset):
"""Convert objects in a Galaxy history into a CWL object.
Useful in running conformance tests and implementing the cwl-runner
interface via Galaxy.
"""
def element_to_cwl_json(element):
element_output = GalaxyOutput(
galaxy_output.history_id,
element["object"]["history_content_type"],
element["object"]["id"],
)
return output_to_cwl_json(element_output, get_metadata, get_dataset)
output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)
if output_metadata["history_content_type"] == "dataset":
ext = output_metadata["file_ext"]
assert output_metadata["state"] == "ok"
dataset_dict = get_dataset(output_metadata)
if ext == "expression.json":
if "content" in dataset_dict:
return json.loads(dataset_dict["content"])
else:
with open(dataset_dict["path"]) as f:
return json.load(f)
else:
return output_properties(**dataset_dict)
elif output_metadata["history_content_type"] == "dataset_collection":
if output_metadata["collection_type"] == "list":
rval = []
for element in output_metadata["elements"]:
rval.append(element_to_cwl_json(element))
elif output_metadata["collection_type"] == "record":
rval = {}
for element in output_metadata["elements"]:
rval[element["element_identifier"]] = element_to_cwl_json(element)
return rval
else:
raise NotImplementedError("Unknown history content type encountered")
| [
"jmchilton@gmail.com"
] | jmchilton@gmail.com |
017790c669c4f1bf70f58eb6cfd4c9089c70646d | 380372bbec9b77df14bb96fc32aca7061cca0635 | /astro/sat/tle2.py | 86c3031403c10cd04cc9e670bf7e423ce77da94d | [] | no_license | IchiroYoshida/python_public | d3c42dc31b3206db3a520a007ea4fb4ce6c1a6fd | 37ccadb1d3d42a38561c7708391f4c11836f5360 | refs/heads/master | 2023-08-16T17:19:07.278554 | 2023-08-13T21:29:51 | 2023-08-13T21:29:51 | 77,261,682 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,742 | py | #! /usr/local/bin/python3.6
"""
直近 TLE データ取得 (from NASA)
: 過去の直近の TLE データ1件を取得する
(過去データが存在しなければ、未来の直近データ)
date name version
2018.06.12 mk-mode.com 1.00 新規作成
Copyright(C) 2018 mk-mode.com All Rights Reserved.
---
引数 : [YYYYMMDD[HHMMSS]]
(JST を指定。無指定なら現在時刻とみなす)
"""
from datetime import datetime
from datetime import timedelta
import re
import requests
import sys
import traceback
FILE ='./iss_tle.py'
class TleIssNasa:
URL = (
"https://spaceflight.nasa.gov/realdata/sightings/"
"SSapplications/Post/JavaSSOP/orbit/ISS/SVPOST.html"
)
UA = (
"mk-mode Bot (by Python/{}.{}.{}, "
"Administrator: postmaster@mk-mode.com)"
).format(
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro
)
MSG_ERR = (
"Invalid date!\n"
"[USAGE] ./tle_iss_nasa.rb [YYYYMMDD[HHMMSS]]"
)
def __init__(self):
if len(sys.argv) < 2:
self.jst = datetime.now()
else:
if re.search(r"^(\d{8}|\d{14})$", sys.argv[1]) is not(None):
dt = sys.argv[1].ljust(14, "0")
try:
self.jst = datetime.strptime(dt, "%Y%m%d%H%M%S")
except ValueError as e:
print(self.MSG_ERR)
sys.exit(1)
else:
print(self.MSG_ERR)
sys.exit(0)
self.utc = self.jst - timedelta(hours=9)
def exec(self):
""" Execution """
tle = ""
utc_tle = None
try:
print(self.jst.strftime("%Y-%m-%d %H:%M:%S.%f JST"))
print(self.utc.strftime("%Y-%m-%d %H:%M:%S.%f UTC"))
print("---")
tles = self.__get_tle()
for new in reversed(tles):
tle = new
item_utc = re.split(" +", tle[0])[3]
y = 2000 + int(item_utc[0:2])
d = float(item_utc[2:])
utc_tle = datetime(y, 1, 1) + timedelta(days=d)
if utc_tle <= self.utc:
break
print("\n".join(tle))
print(utc_tle.strftime("(%Y-%m-%d %H:%M:%S.%f UTC)"))
with open(FILE, 'w') as file:
file.write("import ephem\n\n")
file.write("line1 = "+'"'+'ISS(SARYA)"\n')
file.write("line2 = "+'"'+tle[0]+'"\n')
file.write("line3 = "+'"'+tle[1]+'"\n')
file.write("iss = ephem.readtle(line1, line2, line3)")
except Exception as e:
raise
def __get_tle(self):
""" 最新 TLE 一覧取得 """
res = []
try:
html, status, reason = self.__get_html()
if status != 200 or reason != "OK":
print((
"STATUS: {} ({})"
"[ERROR] Could not retreive html."
).format(status, reason))
sys.exit(1)
for tle in re.findall(r"ISS\n +(1.+?)\n +(2.+?)\n", html):
res.append([tle[0], tle[1]])
return res
except Exception as e:
raise
def __get_html(self):
""" HTML 取得 """
try:
headers = {'User-Agent': self.UA}
res = requests.get(self.URL, headers)
return [res.text, res.status_code, res.reason]
except Exception as e:
raise
if __name__ == '__main__':
try:
obj = TleIssNasa()
obj.exec()
except Exception as e:
traceback.print_exc()
sys.exit(1)
| [
"yoshida.ichi@gmail.com"
] | yoshida.ichi@gmail.com |
1c7c6152f130673e43ea5c7a7a14923df429ae30 | 044facb13bff7414439db8706ed322ea505698fa | /old/python/sandbox/old_stuff/multi_sampling/conditionals.py | 3f0db46bbddcf90a69ec3dfa653fa07f655d73bc | [] | no_license | paglenn/WLC | 2c42944bfe707018b5dbfb6ec471518aff2642c7 | e57544eba1380da6a260532b5b72b39c0d1fa433 | refs/heads/master | 2020-12-24T14:35:54.944179 | 2015-11-09T14:43:35 | 2015-11-09T14:43:35 | 23,404,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import numpy as np
import os
from parameters import *
def calculate_TP():
TP = []
if not os.path.isfile(tp_file):
print("data file ",tp_file,"missing!")
exit()
tpFile = open(tp_file,'r')
for line in tpFile.readlines():
TP.append(float(line[:-1]))
tpFile.close()
return TP
def calculate_RP():
RP = [ ]
if not os.path.isfile(rp_file):
print("data file ",rp_file,"missing!")
exit()
rpFile = open(rp_file,'r')
for line in rpFile.readlines():
RP.append(float(line[:-1]))
rpFile.close()
return RP
def calculate_Z():
Z = []
if not os.path.isfile(z_file):
print("data file ",z_file,"missing!")
exit()
zFile = open(z_file,'r')
for line in zFile.readlines():
Z.append(float(line[:-1]))
zFile.close()
return Z
def calculate_RPTP(RP,TP):
RPTP = []
if not os.path.isfile(rptp_file):
print("data file ",rptp_file,"missing!")
exit()
rptpFile = open(rptp_file,'r')
for line in rptpFile.readlines():
RPTP.append(float(line[:-1]))
rptpFile.close()
return RPTP
TP = calculate_TP()
RP = calculate_RP()
Z = calculate_Z()
RPTP = calculate_RPTP(RP,TP)
z_X_joint = np.histogramdd(np.vstack((Z,TP,RP,RPTP)).T,normed=True)[0]
X_joint = np.histogramdd(np.vstack((TP,RP,RPTP)).T,normed=True)[0]
print(z_X_joint.shape,X_joint.shape)
#print(z_X_joint,X_joint)
#print(z_X_joint)
#z_cond = z_X_joint/X_joint
| [
"nls.pglenn@gmail.com"
] | nls.pglenn@gmail.com |
87d92a7c92ca5581209374f6943207e4fa0b7946 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/list_concat_2-80.py | d2c18826d5b43a8c92424e6a7782377f8745fd04 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | z:[int] = None
i:int = 0
z = [1,2,3] + [4,5,6] + [7,8,9]
while i < len(z):
$ID(z[i])
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
6d60af458900694275feaf31879721e43da59186 | cacb92c6dba32dfb7f2a4a2a02269f40ab0413dd | /mmdet/datasets/pipelines/test_time_aug.py | f3a04e7311af9a306703ced843e4cdfe6f1edb66 | [
"Apache-2.0"
] | permissive | dereyly/mmdet_sota | 697eab302faf28d5bce4092ecf6c4fd9ffd48b91 | fc14933ca0ec2eebb8e7b3ec0ed67cae0da3f236 | refs/heads/master | 2022-11-26T14:52:13.665272 | 2020-08-04T00:26:46 | 2020-08-04T00:26:46 | 272,046,903 | 15 | 5 | Apache-2.0 | 2020-07-16T06:22:39 | 2020-06-13T16:37:26 | Python | UTF-8 | Python | false | false | 2,741 | py | import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug(object):
"""Test-time augmentation with multiple scales and flipping
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple]: Images scales for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal" and "vertical". If flip_direction is list,
multiple flip augmentations will be applied.
It has no effect when flip == False. Default: "horizontal".
"""
def __init__(self,
transforms,
img_scale,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
aug_data = []
flip_aug = [False, True] if self.flip else [False]
for scale in self.img_scale:
for flip in flip_aug:
for direction in self.flip_direction:
_results = results.copy()
_results['scale'] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
repr_str += f'flip_direction={self.flip_direction}'
return repr_str
| [
"nikolay@xix.ai"
] | nikolay@xix.ai |
94cf91731423bf8731c68e6c7bd64038ada51c7e | 265d0477b43dd6391b939d08577bb82c57184cdf | /official/utils/registry_test.py | 47e3722993c79e96706d56757e6e3997f072ffde | [
"Apache-2.0"
] | permissive | EthanGeek/models | 02c9fca96f5b3be7503bb0e75172e2f683bf4b73 | a9fcda17153e4f36d431174934abef4151f1f687 | refs/heads/master | 2022-12-11T08:58:19.241487 | 2020-09-04T00:38:22 | 2020-09-04T00:38:22 | 292,791,920 | 1 | 0 | Apache-2.0 | 2020-09-04T08:21:02 | 2020-09-04T08:21:01 | null | UTF-8 | Python | false | false | 2,560 | py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for registry."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.utils import registry
class RegistryTest(tf.test.TestCase):
def test_register(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test():
pass
self.assertEqual(registry.lookup(collection, 'functions/func_0'), func_test)
@registry.register(collection, 'classes/cls_0')
class ClassRegistryKey:
pass
self.assertEqual(
registry.lookup(collection, 'classes/cls_0'), ClassRegistryKey)
@registry.register(collection, ClassRegistryKey)
class ClassRegistryValue:
pass
self.assertEqual(
registry.lookup(collection, ClassRegistryKey), ClassRegistryValue)
def test_register_hierarchy(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test0():
pass
@registry.register(collection, 'func_1')
def func_test1():
pass
@registry.register(collection, func_test1)
def func_test2():
pass
expected_collection = {
'functions': {
'func_0': func_test0,
},
'func_1': func_test1,
func_test1: func_test2,
}
self.assertEqual(collection, expected_collection)
def test_register_error(self):
collection = {}
@registry.register(collection, 'functions/func_0')
def func_test0(): # pylint: disable=unused-variable
pass
with self.assertRaises(KeyError):
@registry.register(collection, 'functions/func_0/sub_func')
def func_test1(): # pylint: disable=unused-variable
pass
with self.assertRaises(LookupError):
registry.lookup(collection, 'non-exist')
if __name__ == '__main__':
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
c440828efe9f129ee90f410999d2d0d211b37e61 | 54a5f5ec2c5edf924b7dc7730ee7cb2a38ac4a39 | /MergingDataFrameswithpandas/E27_Using_merge_asof.py | e556c2a5d480cbe0125360e461af6d06c17a0cc1 | [] | no_license | dajofischer/Datacamp | fac413ec178375cedceababaf84f6b47a61fc821 | a03d16b8f342412f1ee077f2f196ee8404e2e21c | refs/heads/master | 2020-04-05T08:38:25.361746 | 2019-03-27T20:55:57 | 2019-03-27T20:55:57 | 156,722,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | # Merge auto and oil: merged
merged = pd.merge_asof(auto,oil,left_on='yr' , right_on='Date')
# Print the tail of merged
print(merged.tail())
# Resample merged: yearly
yearly = merged.resample('A',on='Date')[['mpg','Price']].mean()
# Print yearly
print(yearly)
# print yearly.corr()
print(yearly.corr())
| [
"dajofischer@gmail.com"
] | dajofischer@gmail.com |
40891161a6e4cade4b43b30586594b4651c6ee2a | d3efc82dfa61fb82e47c82d52c838b38b076084c | /crossmarketetf/crossmarket_creation_HA/YW_CETFSS_SHSG_048.py | b948171ffe226a7b5fdd317eb7b46970b408751f | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,698 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from crossmarketetf.cetfservice.cetf_main_service import *
from crossmarketetf.cetfservice.cetf_get_components_asset import *
from crossmarketetf.cetfservice.cetf_utils import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
from service.mainService import *
from mysql.getUpOrDownPrice import getUpPrice
class YW_CETFSS_SHSG_048(xtp_test_case):
def test_YW_CETFSS_SHSG_048(self):
# -----------ETF申购-------------
title = '上海ETF申购--可深市股票退补现金替代:T-1日无成分股&资金不足&计算现金比例<最大现金比例→T日申购ETF'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、全成、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010120,
'errorMSG': queryOrderErrorMsg(11010120),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
unit_info = {
'ticker': '550630', # etf代码
'etf_unit': 1.0, # etf申购单位数
'etf_unit_sell': 1.0, # etf卖出单位数
'component_unit_sell': 1.0 # 成分股卖出单位数
}
# -----------查询ETF申购前成分股持仓-------------
component_stk_info = cetf_get_all_component_stk(Api,unit_info['ticker'])
# 查询etf最小申赎数量
unit_number = query_creation_redem_unit(unit_info['ticker'])
# etf申购数量
quantity = int(unit_info['etf_unit'] * unit_number)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_PURCHASE'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
g_func.cetf_parm_init(case_goal['期望状态'])
rs1 = cetf_service_test(Api, case_goal, wt_reqs,component_stk_info)
etf_creation_log(case_goal, rs1)
self.assertEqual(rs1['用例测试结果'], True)
# --------二级市场,卖出etf-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 二级市场卖出的etf数量
quantity = int(unit_info['etf_unit_sell'] * unit_number)
quantity_list = split_etf_quantity(quantity)
# 查询涨停价
limitup_px = getUpPrice(unit_info['ticker'])
rs2 = {}
for etf_quantity in quantity_list:
wt_reqs_etf = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
etf_quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs2 = serviceTest(Api, case_goal, wt_reqs_etf)
if rs2['用例测试结果'] is False:
etf_sell_log(case_goal, rs2)
self.assertEqual(rs2['用例测试结果'], True)
return
etf_sell_log(case_goal, rs2)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 查询etf成分股代码和数量
etf_components = query_cetf_component_share(unit_info['ticker'])
# 如果卖出单位大于100,表示卖出数量;小于100,表示卖出份数
rs3 = {}
for stk_code in etf_components:
# 申购用例1-43会有上海和深圳的成分股各一支,深圳成分股为'008000',只卖上海的
if stk_code != '008000':
components_share = etf_components[stk_code]
quantity = (int(unit_info['component_unit_sell'])
if unit_info['component_unit_sell'] >= 100
else int(components_share * unit_info['component_unit_sell']))
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs3 = serviceTest(Api, case_goal, wt_reqs)
if rs3['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
5ce5c41d4a76c971071d193991947217158f28a7 | 5e1074afdb17eeb3a78eaf16093c8c491d9f3c0e | /pyramid_blogr/services/blog_record.py | 98266b411230078f2501547f4a5ab7f7e57ce5ac | [] | no_license | andyk1278/pyramid_blogr | 52255cf1a09c3ed3a43f6c2503a722db30df9f19 | 1e8b53e754b5315c08a2e982c9be08e76c331748 | refs/heads/master | 2021-01-01T05:54:28.668964 | 2017-07-15T23:12:53 | 2017-07-15T23:12:53 | 97,301,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | import sqlalchemy as sa
from paginate_sqlalchemy import SqlalchemyOrmPage
from ..models.blog_record import BlogRecord
class BlogRecordService(object):
@classmethod
def all(cls, request):
query = request.dbsession.query(BlogRecord)
return query.order_by(sa.desc(BlogRecord.created))
@classmethod
def by_id(cls, _id, request):
query = request.dbsession.query(BlogRecord)
return query.get(_id)
@classmethod
def get_paginator(cls, request, page=1):
query = request.dbsession.query(BlogRecord)
query = query.order_by(sa.desc(BlogRecord.created))
query_params = request.GET.mixed()
def url_maker(link_page):
# replace page param with values generated by paginator
query_params['page'] = link_page
return request.current_route_url(_query=query_params)
return SqlalchemyOrmPage(query, page, items_per_page=5, url_maker=url_maker) | [
"andyk1278@gmail.com"
] | andyk1278@gmail.com |
5c510e80b59c7982d33d9b43ae200d9f186a4b9d | 04c7295ce65a623dc62454aa46ae4ae4ce51ca36 | /Assignment/ass1/q2/ass1q2.py | 25306781782d97e627e5d64efb3a34b5b2ddc616 | [] | no_license | hty-unsw/COMP9021-Python | 38373378162a314a82bf14453d026e641963e1b9 | 97be6dfa730247b59e608ec6d464ac16b4cf1968 | refs/heads/master | 2020-07-03T00:51:23.540099 | 2018-10-30T14:23:15 | 2018-10-30T14:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | import sys
import os.path
from itertools import groupby
try:
N = input('Please enter the name of the file you want to get data from: ')
if not os.path.exists(N):
raise EOFError
else:
f = open(N)
s = f.read()
f.close()
origin_list = s.split()
L = [int(x) for x in origin_list]
if len(L) <= 1 or L[0] <= 0:
raise ValueError
for i in range(len(L)-1):
if L[i+1] - L[i] <=0:
raise ValueError
except ValueError:
print('Sorry, input file does not store valid data.')
sys.exit()
except EOFError:
print('Sorry, there is no such file.')
sys.exit()
#============================
f = open(N)
s = f.read()
f.close()
origin_list = s.split()
L = [int(x) for x in origin_list]
List2 = [L[n] - L[n - 1] for n in range(1, len(L))]
if len(set(List2)) == 1:
print('The ride is perfect!\n'
'The longest good ride has a length of: {}\n'
'The minimal number of pillars to remove to build a perfect ride from the rest is: 0'.format(len(List2)))
else:
List3 = []
for key,group in groupby(List2):
List3.append(len(list(group)))
length = max(List3)
#=============================================
Lset = set(L)
ride_len = 0
for i in range(0,len(L)-1):
for n in range(i+1,len(L)):
diff = L[n] - L[i]
L2 = range(L[i],L[-1]+1,diff)
current_len = 0
for m in range(0,len(L2)):
if L2[m] in Lset:
current_len += 1
else:
break
if current_len > ride_len:
ride_len = current_len
value = len(L) - ride_len
print('The ride could be better...\n'
'The longest good ride has a length of: {}\n'
'The minimal number of pillars to remove to build a perfect ride from the rest is: {}'.format(length, value))
| [
"grey1991ss@gmail.com"
] | grey1991ss@gmail.com |
f9b03003acb6e4f7f8f5bea29ff4743e4c9a8b23 | 7c5e9dd27939492a5f75650e02804f2a84e982ec | /apps/shares/models.py | e47e0b3b474e210421a039b24fbfcaffa1259e91 | [] | no_license | karol-gruszczyk/janusze-biznesu | da431f7b31a7c368c07444ef5e82a25d95fcfc90 | 4d5c18a1d407704a39c7c50a4fdde0391b87d116 | refs/heads/master | 2021-01-17T20:04:06.799391 | 2015-12-04T12:13:56 | 2015-12-04T12:13:56 | 37,723,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from django.db import models
class ShareManager(models.Manager):
@classmethod
def get_records(cls, share):
if type(share) != Share:
raise TypeError("type 'Share' expected")
return ShareRecord.objects.filter(share__pk=share.pk)
@classmethod
def get_groups(cls, share):
if type(share) != Share:
raise TypeError("type 'Share' expected")
return share.sharegroup_set.all()
class Share(models.Model):
name = models.CharField(max_length=32, db_index=True, unique=True)
verbose_name = models.CharField(max_length=64, null=True, blank=True)
updated_daily = models.BooleanField(default=False)
last_updated = models.DateTimeField(null=True)
first_record = models.DateField(null=True)
last_record = models.DateField(null=True)
num_records = models.PositiveIntegerField(null=True)
objects = ShareManager()
class Meta:
get_latest_by = 'last_updated'
def __str__(self):
return self.verbose_name if self.verbose_name else self.name
class ShareSet(models.Model):
shares = models.ManyToManyField(Share)
class ShareRecord(models.Model):
share = models.ForeignKey(Share, null=False, db_index=True, related_name='records')
date = models.DateField(null=False, db_index=True)
open = models.FloatField(null=False)
close = models.FloatField(null=False)
high = models.FloatField(null=False)
low = models.FloatField(null=False)
volume = models.FloatField(null=False)
class Meta:
unique_together = ('share', 'date',)
index_together = ['share', 'date']
class ShareGroup(models.Model):
name = models.CharField(max_length=32, db_index=True, unique=True)
verbose_name = models.CharField(max_length=64, null=True)
shares = models.ManyToManyField(Share, blank=True)
def __str__(self):
return self.verbose_name if self.verbose_name else self.name
| [
"karol.gruszczyk@gmail.com"
] | karol.gruszczyk@gmail.com |
419549b51fa99e60ad2ad5f2e91cdbbcba075227 | fb03477012a435f3ccad5f8bcab5320ccb4f1ada | /src/gateway_proxy_core/apps.py | 59285bbeab77d220bde4e47ff558b79db697f4d7 | [] | no_license | x007007007/gateway-proxy | 5e0ecb17feba3aadfb05afe93485017a87ee8117 | 61046223a39c6f107fb99d70b81f5809b207e11d | refs/heads/master | 2023-02-12T19:08:42.317267 | 2020-12-22T16:07:09 | 2020-12-22T16:07:09 | 318,915,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | from django.apps import AppConfig
class GatewayProxyCoreConfig(AppConfig):
name = 'gateway_proxy_core'
| [
"x007007007@hotmail.com"
] | x007007007@hotmail.com |
e535b1ca14677486264c1141216705096921d269 | ae3f23efcdc4b7fdd1c224043d0ece002955956e | /host/host/containers/lxc/utils.py | 8071ab5980742ef6789095528ee0e01719973559 | [] | no_license | alexeysofin/xplace | 4466682fe76c808288d69f2808ddbca38a583bc4 | 9f12f066a62fae4e789bee94e5e554cc6de26d90 | refs/heads/master | 2023-01-12T01:02:40.137609 | 2021-02-14T20:41:30 | 2021-02-14T20:41:30 | 208,021,139 | 0 | 0 | null | 2023-01-04T10:18:46 | 2019-09-12T10:07:17 | Python | UTF-8 | Python | false | false | 375 | py | import os
from host.utils.sub_process import run_command
from .const import LXC_BIN_PATH
from .exceptions import ContainerException
def run_container_command(command, *args, command_input=None):
command = os.path.join(LXC_BIN_PATH, command)
return run_command(command, *args, command_input=command_input,
exception_class=ContainerException)
| [
"sofin.moffin"
] | sofin.moffin |
5d96058fd7a39afd318a44140707be93e5029f7e | e7022b8eb4179e87007bc184a43cfb470c8637a5 | /code/dbengine.py | 6b4a0f13b3a9004009a5ea357a8e947203f254f8 | [] | no_license | yscoder-github/nl2sql-tianchi | a683c96ce5f545e38c7eb4afbb655ff537c73339 | 2d4463c5098d7533ebf879d874a8c14e61b8f269 | refs/heads/master | 2021-07-07T10:25:47.939612 | 2020-11-29T13:01:34 | 2020-11-29T13:01:34 | 207,695,804 | 74 | 18 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | # -*- coding:utf-8 -*-
import json
import records
import re
from config import *
class DBEngine:
def __init__(self, fdb):
self.db = records.Database('sqlite:///{}'.format(fdb))
self.conn = self.db.get_connection()
def execute(self, table_id, select_index, aggregation_index, conditions, condition_relation):
"""
table_id: id of the queried table.
select_index: list of selected column index, like [0,1,2]
aggregation_index: list of aggregation function corresponding to selected column, like [0,0,0], length is equal to select_index
conditions: [[condition column, condition operator, condition value], ...]
condition_relation: 0 or 1 or 2
"""
table_id = 'Table_{}'.format(table_id)
# 条件数>1 而 条件关系为''
if condition_relation == 0 and len(conditions) > 1:
return 'Error1'
# 选择列或条件列为0
if len(select_index) == 0 or len(conditions) == 0 or len(aggregation_index) == 0:
return 'Error2'
condition_relation = rela_dict[condition_relation]
select_part = ""
for sel, agg in zip(select_index, aggregation_index):
select_str = 'col_{}'.format(sel+1)
agg_str = agg_dict[agg]
if agg:
select_part += '{}({}),'.format(agg_str, select_str)
else:
select_part += '({}),'.format(select_str)
select_part = select_part[:-1]
where_part = []
for col_index, op, val in conditions:
if PY3:
where_part.append('col_{} {} "{}"'.format(col_index+1, cond_op_dict[op], val))
else:
where_part.append('col_{} {} "{}"'.format(col_index+1, cond_op_dict[op], val.encode('utf-8')))
where_part = 'WHERE ' + condition_relation.join(where_part)
query = 'SELECT {} FROM {} {}'.format(select_part, table_id, where_part)
if PY2:
query = query.decode('utf-8')
try:
out = self.conn.query(query).as_dict()
except:
return 'Error3'
# result_set = [tuple(set(i.values())) for i in out]
if PY2:
result_set = [tuple(sorted(i.values())) for i in out]
else:
result_set = [tuple(sorted(i.values(), key=lambda x:str(x))) for i in out]
return result_set | [
"yscoder@foxmail.com"
] | yscoder@foxmail.com |
ca458ad14d97e536e9aa50c93af18dc6a1bae1b3 | 7a42d40a351824464a3c78dc0c3e78bbd8e0a92f | /bigdog_blog/blog/models.py | accbdf0df9a4805d5522a5c121acb6c6c2048712 | [] | no_license | AhMay/DerekBlogLearn | 6595063eafbc237b932e187b5cb3ad8ff32637fc | fdd5ea2fc5732cdc82ad006f7be0a2a1f30d0ba9 | refs/heads/master | 2020-07-09T05:20:33.283672 | 2019-09-29T10:10:23 | 2019-09-29T10:10:23 | 203,891,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from unidecode import unidecode
from django.template.defaultfilters import slugify
from ckeditor_uploader.fields import RichTextUploadingField
from datetime import datetime
# Create your models here.
class Article(models.Model):
'''文章模型'''
STATUS_CHOICES =(
('d', '草稿'),
('p','发表'),
)
title = models.CharField('标题', max_length=200 )
slug = models.SlugField('slug', max_length=60, blank=True) #slug 最大的作用就是便于读者和搜索引擎之间从url中了解文章大概包含了什么内容
# body = models.TextField('正文')
body = RichTextUploadingField('正文')
pub_date = models.DateTimeField('发布时间', null=True, blank=True)
create_date = models.DateTimeField('创建时间', auto_now_add=True)
mod_date = models.DateTimeField('修改时间', auto_now=True)
status = models.CharField('文章状态', max_length=1, choices=STATUS_CHOICES, default='d')
views = models.PositiveIntegerField('浏览量', default=0)
author = models.ForeignKey(User, verbose_name='作者', on_delete=models.CASCADE)
users_like = models.ManyToManyField(User,related_name='articles_liked', blank=True)
category = models.ManyToManyField('Category',verbose_name='分类', blank=True, null=True) #多对多
tags = models.ManyToManyField('Tag', verbose_name='标签', blank=True, null=True) #多对多
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.id or not self.slug: #还没有保存,slug 还没有生成
self.slug = slugify(unidecode(self.title)) #中文标题
super().save(*args,**kwargs)
#当模型的各个字段之间并不彼此独立时,可以添加自定义的clean方法
def clean(self):
if self.status == 'd' and self.pub_date is not None:
self.pub_date = None
if self.status == 'p' and self.pub_date is None:
self.pub_date = datetime.now()
def get_absolute_url(self):
return reverse('blog:article_detail', args=[self.pk, self.slug])
def viewed(self):
self.views +=1
self.save(update_fields=['views'])
def published(self):
self.status = 'p'
self.pub_date = datetime.now()
self.save(update_fields=['status','pub_date'])
class Meta:
ordering = ['-pub_date']
verbose_name ='文章'
verbose_name_plural = verbose_name
class Category(models.Model):
'''文章分类'''
name = models.CharField('分类名', max_length=30)
slug = models.SlugField('slug', max_length=40,blank=True)
parent_category = models.ForeignKey('self', verbose_name='父级分类', blank=True, null=True, on_delete=models.CASCADE)
def get_absolute_url(self):
return reverse('blog:category_detail', args=[self.pk, self.slug])
def save(self, *args, **kwargs):
if not self.id or not self.slug:
self.slug = slugify(unidecode(self.name))
super().save(args, kwargs)
def has_child(self):
if self.category_set.all().count() >0: # 外键
return True
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = '分类'
verbose_name_plural = verbose_name
class Tag(models.Model):
'''文章标签'''
name = models.CharField('标签名', max_length=30, unique=True)
slug = models.SlugField('slug', max_length=40,blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('blog:tag_detail', args=[self.name])
def get_article_count(self):
return Article.objects.filter(tags__slug=self.slug).count() #slug 应该不为空
def save(self,*args, **kwargs):
if not self.id or not self.slug:
self.slug = slugify(unidecode(self.name))
super().save(*args,**kwargs)
class Meta:
ordering =['name']
verbose_name = '标签'
verbose_name_plural = verbose_name | [
"meizi111082@hotmail.com"
] | meizi111082@hotmail.com |
791c91c8bad3f89f23330677eec8ef411ee77ec5 | 532ca0c5361b54970bc435232e2a6d079c49aecd | /02_Strings and Console Output/01__Strings and Console Output/02_Practice.py | 3d197e67ca3925a0945cf843aae122c22ef018ea | [] | no_license | haveano/codeacademy-python_v1 | dc5484e8df73b9a15ffce835dde625b6454c8302 | 10e6fb2974e1c47f380bb6a33c50b171ecfbf50f | refs/heads/master | 2021-01-11T16:45:57.337493 | 2017-05-30T10:04:08 | 2017-05-30T10:04:08 | 79,660,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
Practice
Excellent! Let's get a little practice in with strings.
Instructions
Set the following variables to their respective phrases:
Set caesar to "Graham"
Set praline to "John"
Set viking to "Teresa"
"""
# Assign your variables below, each on its own line!
caesar = "Graham"
praline = "John"
viking = "Teresa"
# Put your variables above this line
print caesar
print praline
print viking
| [
"noreply@github.com"
] | haveano.noreply@github.com |
844f5b18c6467cd856b107372a08f137d28c5bbf | 07564c75c1f37f2e0304720d1c01f23a27ef3469 | /543.DiameterofBinaryTree/solution.py | 9d5a9edc240760c0c6c070bffacb98e6e92bb25b | [] | no_license | ynXiang/LeetCode | 5e468db560be7f171d7cb24bcd489aa81471349c | 763372587b9ca3f8be4c843427e4760c3e472d6b | refs/heads/master | 2020-05-21T18:27:16.941981 | 2018-01-09T22:17:42 | 2018-01-09T22:17:42 | 84,642,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return 0 if not root else self.cal(root)[0]-1
def cal(self, root):
if root == None:
return 0, 0
else:
lp, lm = self.cal(root.left)
rp, rm = self.cal(root.right)
return max(lp, rp, lm+1+rm), max(lm+1, rm+1)
| [
"yinan_xiang@163.com"
] | yinan_xiang@163.com |
6e6933c8437c6d5ee41f9b5347467037f3a952c0 | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py | a6d678af44ae14e714169ade419968e4d2a623e4 | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 3,087 | py | import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
m = 50
n = 1000
cols = ["jim", "joe", "jolie", "joline", "jolia"]
vals = [
np.random.randint(0, 10, n),
np.random.choice(list("abcdefghij"), n),
np.random.choice(pd.date_range("20141009", periods=10).tolist(), n),
np.random.choice(list("ZYXWVUTSRQ"), n),
np.random.randn(n),
]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [
np.random.randint(0, 11, m),
np.random.choice(list("abcdefghijk"), m),
np.random.choice(pd.date_range("20141009", periods=11).tolist(), m),
np.random.choice(list("ZYXWVUTSRQP"), m),
]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[:: n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a = pd.concat([df, df])
b = df.drop_duplicates(subset=cols[:-1])
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
@pytest.mark.parametrize("lexsort_depth", list(range(5)))
@pytest.mark.parametrize("key", keys)
@pytest.mark.parametrize("frame", [a, b])
def test_multiindex_get_loc(lexsort_depth, key, frame):
# GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
def validate(mi, df, key):
mask = np.ones(len(df)).astype("bool")
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[: i + 1] not in mi.index
continue
assert key[: i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
return_value = right.drop(cols[: i + 1], axis=1, inplace=True)
assert return_value is None
return_value = right.set_index(cols[i + 1 : -1], inplace=True)
assert return_value is None
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
else: # full key
return_value = right.set_index(cols[:-1], inplace=True)
assert return_value is None
if len(right) == 1: # single hit
right = Series(
right["jolia"].values, name=right.index[0], index=["jolia"]
)
tm.assert_series_equal(mi.loc[key[: i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
if lexsort_depth == 0:
df = frame.copy()
else:
df = frame.sort_values(by=cols[:lexsort_depth])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < lexsort_depth
validate(mi, df, key)
| [
"ana.kapros@yahoo.ro"
] | ana.kapros@yahoo.ro |
2f66e6e8846a2c3a80d4c7e9ad2324bdf9096648 | f0257428fed2a5f10950ee52e88a0f6811120323 | /study_oldboy/Day11/01.11_rabbitmq_topic_subscriber.py | 6b8487dfa3db86e47d7023e531f079912e4cb67b | [] | no_license | tata-LY/python | 454d42cc8f6db9a1450966aba4af6894e1b59b78 | 55d13b7f61cbb87ff3f272f596cd5b8c53b807c5 | refs/heads/main | 2023-04-08T19:31:57.945506 | 2021-04-19T05:39:17 | 2021-04-19T05:39:17 | 328,880,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021-3-9 8:47
# @Author : liuyang
# @File : 01.11_rabbitmq_topic_subscriber.py
# @Software: PyCharm
import pika
import sys
hostname = '192.168.113.11'
connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname))
channel = connection.channel()
channel.exchange_declare(exchange='topic_logs',
exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
binding_keys = sys.argv[1:]
if not binding_keys:
sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
sys.exit(1)
for binding_key in binding_keys:
channel.queue_bind(exchange='topic_logs',
queue=queue_name,
routing_key=binding_key)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [X] %r:%r" % (method.routing_key, body))
channel.basic_consume(queue_name,
callback,
True)
channel.start_consuming() | [
"ainiyang20@qq.com"
] | ainiyang20@qq.com |
4c1b7e82cf084943af5406747c7b74ab3e20a6fe | ad6fe640e0074f08961a55d727bc204dcdcf8848 | /src/simplessl/ca.py | ee0d4faa639c23be7fb011b6de3c953378009ab5 | [] | no_license | andrewcooke/simple-ssl | 50a8764e340d5f78f0c17c273d0445a533db5f8c | 87d619bcd0f3c3326e9177c64252783f559ba9eb | refs/heads/master | 2021-01-01T16:13:21.228950 | 2013-12-10T01:43:17 | 2013-12-10T01:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py |
from script.argp import ArgP, ArgPRoot, ArgPRun
from script.attr import StrAttr
class CaCore(ArgPRoot):
dir = ArgP(StrAttr, value='.',
description='Where a CA stores the data it needs to do its work.')
def __call__(self):
print(self.dir.__get__(self, type(self)))
print("dir is " + str(self.dir))
if __name__ == '__main__':
ArgPRun(CaCore)
| [
"andrew@acooke.org"
] | andrew@acooke.org |
2725f826472f8bc43b3068109792eecdae0fc910 | 5c4515960dcbfd3861d06d90b8c9bde0bdf3ecf5 | /Iserlab/migrations/0130_mytempvm.py | f24ae4cf5f65d103faabb44569c96b7a2c006487 | [] | no_license | Mathilda1992/mcysite | 66bb2f51de622b7f7c450664c798eb11ce195cae | def82e43474ecc734c6cbb26842bd87f698b2b88 | refs/heads/master | 2021-01-11T19:58:23.611196 | 2017-06-26T08:58:11 | 2017-06-26T08:58:11 | 79,434,975 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-10 07:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Iserlab', '0129_auto_20170507_0516'),
]
operations = [
migrations.CreateModel(
name='MyTempVM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('createtime', models.DateTimeField(auto_now_add=True)),
('teacher', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Iserlab.User')),
('vm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Iserlab.VM')),
],
options={
'ordering': ['-createtime'],
},
),
]
| [
"machenyi2011@163.com"
] | machenyi2011@163.com |
8da88a7f51735fe571ed0f617232307ac1baf9f6 | c361a25acecd016677bbd0c6d9fc56de79cf03ed | /TSM/TestCase.py | 23412a68d4d65560b1b1074bef9519e4955fd92f | [] | no_license | danielmellado/zephyr | f8931633045959e7e9a974de8b700a287a1ae94e | dc6f85b78b50e599504966154b927fe198d7402d | refs/heads/master | 2021-01-12T22:31:24.479814 | 2015-10-14T05:39:04 | 2015-10-14T06:24:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | __author__ = 'micucci'
# Copyright 2015 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import importlib
import logging
import datetime
from common.Exceptions import *
from TestScenario import TestScenario
from VTM.VirtualTopologyManager import VirtualTopologyManager
from PTM.PhysicalTopologyManager import PhysicalTopologyManager
class TestCase(unittest.TestCase):
class_scenario = None
""" :type: TestScenario"""
vtm = None
""" :type: VirtualTopologyManager"""
ptm = None
""" :type: PhysicalTopologyManager"""
setup_logger = None
""" :type: logging.Logger"""
@staticmethod
def supported_scenarios():
"""
Subclasses should override to return a set of supported scenario classes
:return: set[class]
"""
return set()
@staticmethod
def get_class(fqn):
"""
Return the class from the fully-qualified package/module/class name
:type fqn: str
:return:
"""
class_name = fqn.split('.')[-1]
module_name = '.'.join(fqn.split('.')[0:-1])
module = importlib.import_module(module_name if module_name != '' else class_name)
impl_class = getattr(module, class_name)
if not issubclass(impl_class, TestCase):
raise ArgMismatchException('Class: ' + fqn + ' is not a subclass of TSM.TestCase')
return impl_class
@classmethod
def _get_name(cls):
return cls.__name__
@classmethod
def _prepare_class(cls, current_scenario, tsm_logger=logging.getLogger()):
cls.class_scenario = current_scenario
cls.ptm = current_scenario.ptm
cls.vtm = current_scenario.vtm
cls.setup_logger = tsm_logger
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self.LOG = logging.getLogger('test-case-null-logger')
""" :type: logging.Logger"""
self.CONSOLE = logging.getLogger('test-case-null-logger')
""" :type: logging.Logger"""
self.start_time = None
""" :type: datetime.datetime"""
self.stop_time = None
""" :type: datetime.datetime"""
self.run_time = None
""" :type: datetime.datetime"""
self.current_scenario = self.class_scenario
""" :type: TestScenario"""
self.LOG.addHandler(logging.NullHandler())
def run(self, result=None):
self.start_time = datetime.datetime.utcnow()
self.LOG.info('Running test case: ' + self._get_name() + ' - ' + self._testMethodName)
super(TestCase, self).run(result)
self.LOG.info('Test case finished: ' + self._get_name() + ' - ' + self._testMethodName)
self.stop_time = datetime.datetime.utcnow()
self.run_time = (self.stop_time - self.start_time)
def set_logger(self, log, console=None):
self.LOG = log
self.CONSOLE = console
def runTest(self):
pass
| [
"micucci@midokura.com"
] | micucci@midokura.com |
66ab1c7847a102afd4b9a6c0d36a0c17bebc5b7c | 51aee975a3d2889fb0f3d72f96da5654978daf3d | /processBar/bag/special.py | fab12369bd40f9e4033d67d1d5e3ad8c423fdabc | [] | no_license | zizle/PyQt5_Demo | 5bcca6bcae2b94aafff25069ffe41f9def411348 | 13985343d30bf282486e9af1d0e51233a354b666 | refs/heads/master | 2020-07-27T20:29:05.732915 | 2019-11-07T03:37:06 | 2019-11-07T03:37:06 | 209,207,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,830 | py | # _*_ coding:utf-8 _*_
# __Author__: zizle
from PyQt5.QtCore import QSize, pyqtProperty, QTimer, Qt, QRectF
from PyQt5.QtGui import QColor, QPainter, QFont
from PyQt5.QtWidgets import QWidget
class CircleProgressBar(QWidget):
Color = QColor(24, 189, 155) # 圆圈颜色
Clockwise = True # 顺时针还是逆时针
Delta = 36
def __init__(self, *args, color=None, clockwise=True, **kwargs):
super(CircleProgressBar, self).__init__(*args, **kwargs)
self.angle = 0
self.Clockwise = clockwise
if color:
self.Color = color
self._timer = QTimer(self, timeout=self.update)
self._timer.start(100)
def paintEvent(self, event):
super(CircleProgressBar, self).paintEvent(event)
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.translate(self.width() / 2, self.height() / 2)
side = min(self.width(), self.height())
painter.scale(side / 100.0, side / 100.0)
painter.rotate(self.angle)
painter.save()
painter.setPen(Qt.NoPen)
color = self.Color.toRgb()
for i in range(11):
color.setAlphaF(1.0 * i / 10)
painter.setBrush(color)
painter.drawEllipse(30, -10, 20, 20)
painter.rotate(36)
painter.restore()
self.angle += self.Delta if self.Clockwise else -self.Delta
self.angle %= 360
@pyqtProperty(QColor)
def color(self) -> QColor:
return self.Color
@color.setter
def color(self, color: QColor):
if self.Color != color:
self.Color = color
self.update()
@pyqtProperty(bool)
def clockwise(self) -> bool:
return self.Clockwise
@clockwise.setter
def clockwise(self, clockwise: bool):
if self.Clockwise != clockwise:
self.Clockwise = clockwise
self.update()
@pyqtProperty(int)
def delta(self) -> int:
return self.Delta
@delta.setter
def delta(self, delta: int):
if self.delta != delta:
self.delta = delta
self.update()
def sizeHint(self) -> QSize:
return QSize(100, 100)
class PercentProgressBar(QWidget):
MinValue = 0
MaxValue = 100
Value = 0
BorderWidth = 8
Clockwise = True # 顺时针还是逆时针
ShowPercent = True # 是否显示百分比
ShowFreeArea = False # 显示背后剩余
ShowSmallCircle = False # 显示带头的小圆圈
TextColor = QColor(255, 255, 255) # 文字颜色
BorderColor = QColor(24, 189, 155) # 边框圆圈颜色
BackgroundColor = QColor(70, 70, 70) # 背景颜色
def __init__(self, *args, value=0, minValue=0, maxValue=100,
borderWidth=8, clockwise=True, showPercent=True,
showFreeArea=False, showSmallCircle=False,
textColor=QColor(255, 255, 255),
borderColor=QColor(24, 189, 155),
backgroundColor=QColor(70, 70, 70), **kwargs):
super(PercentProgressBar, self).__init__(*args, **kwargs)
self.Value = value
self.MinValue = minValue
self.MaxValue = maxValue
self.BorderWidth = borderWidth
self.Clockwise = clockwise
self.ShowPercent = showPercent
self.ShowFreeArea = showFreeArea
self.ShowSmallCircle = showSmallCircle
self.TextColor = textColor
self.BorderColor = borderColor
self.BackgroundColor = backgroundColor
def setRange(self, minValue: int, maxValue: int):
if minValue >= maxValue: # 最小值>=最大值
return
self.MinValue = minValue
self.MaxValue = maxValue
self.update()
def paintEvent(self, event):
super(PercentProgressBar, self).paintEvent(event)
width = self.width()
height = self.height()
side = min(width, height)
painter = QPainter(self)
# 反锯齿
painter.setRenderHints(QPainter.Antialiasing |
QPainter.TextAntialiasing)
# 坐标中心为中间点
painter.translate(width / 2, height / 2)
# 按照100x100缩放
painter.scale(side / 100.0, side / 100.0)
# 绘制中心园
self._drawCircle(painter, 50)
# 绘制圆弧
self._drawArc(painter, 50 - self.BorderWidth / 2)
# 绘制文字
self._drawText(painter, 50)
def _drawCircle(self, painter: QPainter, radius: int):
# 绘制中心园
radius = radius - self.BorderWidth
painter.save()
painter.setPen(Qt.NoPen)
painter.setBrush(self.BackgroundColor)
painter.drawEllipse(QRectF(-radius, -radius, radius * 2, radius * 2))
painter.restore()
def _drawArc(self, painter: QPainter, radius: int):
# 绘制圆弧
painter.save()
painter.setBrush(Qt.NoBrush)
# 修改画笔
pen = painter.pen()
pen.setWidthF(self.BorderWidth)
pen.setCapStyle(Qt.RoundCap)
arcLength = 360.0 / (self.MaxValue - self.MinValue) * self.Value
rect = QRectF(-radius, -radius, radius * 2, radius * 2)
if not self.Clockwise:
# 逆时针
arcLength = -arcLength
# 绘制剩余进度圆弧
if self.ShowFreeArea:
acolor = self.BorderColor.toRgb()
acolor.setAlphaF(0.2)
pen.setColor(acolor)
painter.setPen(pen)
painter.drawArc(rect, (0 - arcLength) *
16, -(360 - arcLength) * 16)
# 绘制当前进度圆弧
pen.setColor(self.BorderColor)
painter.setPen(pen)
painter.drawArc(rect, 0, -arcLength * 16)
# 绘制进度圆弧前面的小圆
if self.ShowSmallCircle:
offset = radius - self.BorderWidth + 1
radius = self.BorderWidth / 2 - 1
painter.rotate(-90)
circleRect = QRectF(-radius, radius + offset,
radius * 2, radius * 2)
painter.rotate(arcLength)
painter.drawEllipse(circleRect)
painter.restore()
def _drawText(self, painter: QPainter, radius: int):
# 绘制文字
painter.save()
painter.setPen(self.TextColor)
painter.setFont(QFont('Arial', 25))
strValue = '{}%'.format(int(self.Value / (self.MaxValue - self.MinValue)
* 100)) if self.ShowPercent else str(self.Value)
painter.drawText(QRectF(-radius, -radius, radius * 2,
radius * 2), Qt.AlignCenter, strValue)
painter.restore()
@pyqtProperty(int)
def minValue(self) -> int:
return self.MinValue
@minValue.setter
def minValue(self, minValue: int):
if self.MinValue != minValue:
self.MinValue = minValue
self.update()
@pyqtProperty(int)
def maxValue(self) -> int:
return self.MaxValue
@maxValue.setter
def maxValue(self, maxValue: int):
if self.MaxValue != maxValue:
self.MaxValue = maxValue
self.update()
@pyqtProperty(int)
def value(self) -> int:
return self.Value
@value.setter
def value(self, value: int):
if self.Value != value:
self.Value = value
self.update()
@pyqtProperty(float)
def borderWidth(self) -> float:
return self.BorderWidth
@borderWidth.setter
def borderWidth(self, borderWidth: float):
if self.BorderWidth != borderWidth:
self.BorderWidth = borderWidth
self.update()
@pyqtProperty(bool)
def clockwise(self) -> bool:
return self.Clockwise
@clockwise.setter
def clockwise(self, clockwise: bool):
if self.Clockwise != clockwise:
self.Clockwise = clockwise
self.update()
@pyqtProperty(bool)
def showPercent(self) -> bool:
return self.ShowPercent
@showPercent.setter
def showPercent(self, showPercent: bool):
if self.ShowPercent != showPercent:
self.ShowPercent = showPercent
self.update()
@pyqtProperty(bool)
def showFreeArea(self) -> bool:
return self.ShowFreeArea
@showFreeArea.setter
def showFreeArea(self, showFreeArea: bool):
if self.ShowFreeArea != showFreeArea:
self.ShowFreeArea = showFreeArea
self.update()
@pyqtProperty(bool)
def showSmallCircle(self) -> bool:
return self.ShowSmallCircle
@showSmallCircle.setter
def showSmallCircle(self, showSmallCircle: bool):
if self.ShowSmallCircle != showSmallCircle:
self.ShowSmallCircle = showSmallCircle
self.update()
@pyqtProperty(QColor)
def textColor(self) -> QColor:
return self.TextColor
@textColor.setter
def textColor(self, textColor: QColor):
if self.TextColor != textColor:
self.TextColor = textColor
self.update()
@pyqtProperty(QColor)
def borderColor(self) -> QColor:
return self.BorderColor
@borderColor.setter
def borderColor(self, borderColor: QColor):
if self.BorderColor != borderColor:
self.BorderColor = borderColor
self.update()
@pyqtProperty(QColor)
def backgroundColor(self) -> QColor:
return self.BackgroundColor
@backgroundColor.setter
def backgroundColor(self, backgroundColor: QColor):
if self.BackgroundColor != backgroundColor:
self.BackgroundColor = backgroundColor
self.update()
def setValue(self, value):
self.value = value
def sizeHint(self) -> QSize:
return QSize(100, 100)
| [
"zizle_lin@163.com"
] | zizle_lin@163.com |
340cced1c55c9499682cd5949e558809a69cf8be | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /GP8Tywnn2gucEfSMf_17.py | ec8e7521de8652ea402b3016afa29c871da03a89 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
def error(n):
adict = {1: 'Check the fan: e1', 2: 'Emergency stop: e2', 3: 'Pump Error: e3',4: 'c: e4', 5: 'Temperature Sensor Error: e5'}
if n not in adict.keys():
return 101
else:
return adict[n]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0c82b427b1e416b6454f6c7d36f356c26adc1fd2 | 2a45af8ec8a4c87d544f461d27795a283f8f5f67 | /python/input_complete.py | 45e2c6077dd6187d1fcb5bd81dc805136a70f19f | [] | no_license | fengidri/python-script | 2199a16a2d0cc76e6055aec31aaced4638a8c86d | 28fb8e6dbf9e6ba5a1f9c4c3d7b635212bfc5b66 | refs/heads/master | 2020-04-05T14:04:55.103302 | 2017-04-27T10:32:27 | 2017-04-27T10:32:27 | 8,678,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | import os
import re
import readline
RE_SPACE = re.compile('.*\s+$', re.M)
class Completer(object):
def _listdir(self, root):
"List directory 'root' appending the path separator to subdirs."
res = []
for name in os.listdir(root):
path = os.path.join(root, name)
if os.path.isdir(path):
name += os.sep
res.append(name)
return res
def _complete_path(self, path=None):
"Perform completion of filesystem path."
if not path:
return self._listdir('.')
dirname, rest = os.path.split(path)
tmp = dirname if dirname else '.'
res = [os.path.join(dirname, p)
for p in self._listdir(tmp) if p.startswith(rest)]
# more than one match, or single match which does not exist (typo)
if len(res) > 1 or not os.path.exists(path):
return res
# resolved to a single directory, so return list of files below it
if os.path.isdir(path):
return [os.path.join(path, p) for p in self._listdir(path)]
# exact file match terminates this completion
return [path + ' ']
def complete_extra(self, args):
"Completions for the 'extra' command."
if not args:
return self._complete_path('.')
# treat the last arg as a path and complete it
return self._complete_path(args[-1])
def path(self, text, state):
"Generic readline completion entry point."
buffer = readline.get_line_buffer()
line = readline.get_line_buffer().split()
# show all commands
# account for last argument ending in a space
if RE_SPACE.match(buffer):
line.append('')
# resolve command to the implementation function
return (self.complete_extra(line) + [None])[state]
return [cmd + ' '][state]
def input_path( ):
comp = Completer( )
# we want to treat '/' as part of a word, so override the delimiters
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.path)
if __name__ == "__main__":
raw_input( )
| [
"fengidri@gmail.com"
] | fengidri@gmail.com |
8fab41a7003e035af2fca6201f82467c6caa256e | 7f3c0c7cb3987356171e91b2e888e2bfbe2f5077 | /group_discussion/migrations/0011_topicuser_group_centrality.py | c300d7e28d073aa8945c43f8deb7baa6ca68aa0b | [] | no_license | jscott1989/newscircle | 7d329673ed58dd2309ac6182fae3452bd50a8d54 | 373eba2f9aaa747272092521581d78524585df55 | refs/heads/master | 2020-12-24T11:53:12.865783 | 2016-11-07T17:50:42 | 2016-11-07T17:50:42 | 73,105,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('group_discussion', '0010_comment_created_at'),
]
operations = [
migrations.AddField(
model_name='topicuser',
name='group_centrality',
field=models.IntegerField(null=True),
preserve_default=True,
),
]
| [
"jonathan@jscott.me"
] | jonathan@jscott.me |
72663f4b331865732b2fab003dfd7cda950f5dea | 374dea7d7d1a424d91f369cc75b11b16e1a489cd | /XDG_CACHE_HOME/Microsoft/Python Language Server/stubs.v1/bPN_2o1RXRZaK7Vxgp3oTysbcxQmJr9XStOWBh0VWNo=/_multiprocessing.cpython-37m-x86_64-linux-gnu.pyi | ab4051ca67e5ced0fe3ca7d109b3f9b4d0cd4e04 | [] | no_license | tkoon107/text-generation-LSTM-neural-net | ed0e6a0fb906f4b4fd649eadfe36c254144be016 | 6b98ee355a30da128462bfac531509539d6533ae | refs/heads/master | 2020-05-27T16:46:44.128875 | 2019-06-10T18:26:54 | 2019-06-10T18:26:54 | 188,708,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | pyi | import builtins as _mod_builtins
class SemLock(_mod_builtins.object):
'Semaphore/Mutex type'
SEM_VALUE_MAX = 2147483647
__class__ = SemLock
def __enter__(self):
'enter the semaphore/lock'
return self
def __exit__(self):
'exit the semaphore/lock'
pass
def __init__(self, *args, **kwargs):
'Semaphore/Mutex type'
pass
@classmethod
def __init_subclass__(cls):
'This method is called when a class is subclassed.\n\nThe default implementation does nothing. It may be\noverridden to extend subclasses.\n'
return None
@classmethod
def __subclasshook__(cls, subclass):
'Abstract classes can override this to customize issubclass().\n\nThis is invoked early on by abc.ABCMeta.__subclasscheck__().\nIt should return True, False or NotImplemented. If it returns\nNotImplemented, the normal algorithm is used. Otherwise, it\noverrides the normal algorithm (and the outcome is cached).\n'
return False
def _after_fork(self):
'rezero the net acquisition count after fork()'
pass
def _count(self):
'num of `acquire()`s minus num of `release()`s for this process'
pass
def _get_value(self):
'get the value of the semaphore'
pass
def _is_mine(self):
'whether the lock is owned by this thread'
pass
def _is_zero(self):
'returns whether semaphore has value zero'
pass
@classmethod
def _rebuild(cls):
pass
def acquire(self):
'acquire the semaphore/lock'
pass
@property
def handle(self):
pass
@property
def kind(self):
pass
@property
def maxvalue(self):
pass
@property
def name(self):
pass
def release(self):
'release the semaphore/lock'
pass
__doc__ = None
__file__ = '/home/trevor/anaconda3/lib/python3.7/lib-dynload/_multiprocessing.cpython-37m-x86_64-linux-gnu.so'
__name__ = '_multiprocessing'
__package__ = ''
flags = _mod_builtins.dict()
def sem_unlink():
pass
| [
"trevorlang@langdatascience.org"
] | trevorlang@langdatascience.org |
8beeca84710f4f56f3bbffee44da80f9a7637cbc | b0f41ef2af5309fc172b05232dbde501a01d1234 | /fyt/webauth/tests/get_pgt.py | 82e40defcf0403075e2e661dfa9b8d323024d423 | [] | no_license | rlmv/doc-trips | c4dfec9b80cf531b69b17ac2caaef509fa048cd3 | 59c1ffc0bff1adb4f86f1dcfaa66d8970ff55b72 | refs/heads/master | 2023-05-27T01:48:49.251830 | 2021-08-07T04:02:26 | 2021-08-07T04:02:26 | 21,745,373 | 10 | 3 | null | 2023-05-23T00:51:26 | 2014-07-11T17:36:35 | Python | UTF-8 | Python | false | false | 429 | py | # Run via bin/django shell --plain < get_pgt.py
# to pick up all the django environment
# Allows main test class to be independent of CAS implementation platform
# TODO: pass in iou - if cant take args write to file and read here
import atexit
from django_cas.models import PgtIOU
@atexit.register
def lookup_pgt():
pgt = PgtIOU.objects.latest('created')
if pgt:
print(pgt.tgt)
else:
print('FAIL')
| [
"bo.marchman@gmail.com"
] | bo.marchman@gmail.com |
7173d52df0b048d98533fa53729620a11ea3b6f5 | c4fa1ebcdd413c4ab3f0979ee3beead8a8809870 | /providers/gov/clinicaltrials/apps.py | 8ff0b4b5fc7d7930735d5f96d0039eb757546874 | [] | no_license | terroni/SHARE | e47f291db7cf100d29a7904fe820e75d29db1472 | a5631f441da1288722c68785b86128c854cbe7c1 | refs/heads/develop | 2020-12-03T02:29:47.381341 | 2016-07-11T19:40:27 | 2016-07-11T19:40:27 | 63,097,148 | 1 | 0 | null | 2016-07-11T19:45:51 | 2016-07-11T19:45:50 | null | UTF-8 | Python | false | false | 347 | py | from share.provider import ProviderAppConfig
from .harvester import ClinicalTrialsHarvester
class AppConfig(ProviderAppConfig):
name = 'providers.gov.clinicaltrials'
version = '0.0.1'
title = 'clinicaltrials'
long_title = 'ClinicalTrials.gov'
home_page = 'https://clinicaltrials.gov/'
harvester = ClinicalTrialsHarvester
| [
"icereval@gmail.com"
] | icereval@gmail.com |
b38eb2a7ce9fb308e375c9cdbe8bc50c31984eb7 | 694d57c3e512ce916269411b51adef23532420cd | /python/hardway/ex6.py | a24f3aa7aba6c3bbf44dd4427988a8f522bce255 | [] | no_license | clovery410/mycode | 5541c3a99962d7949832a0859f18819f118edfba | e12025e754547d18d5bb50a9dbe5e725fd03fd9c | refs/heads/master | 2021-05-16T02:46:47.996748 | 2017-05-10T23:43:50 | 2017-05-10T23:43:50 | 39,235,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | x = "There are %d types of people." % 10 # make definition of variable x
binary = "binary" # make definition of variable binary
do_not = "don't" # make definition of variable do_not
y = "Those who know %s and those who %s." % (binary, do_not) # make definition of variable y
print x # print x
print y # print y
print "I said: %r." % x # print a string with variable x
print "I also said: '%s'." % y # print a string with variable y
hilarious = False # assign variable hilarious with the value of False
joke_evaluation = "Isn't that joke so funny?! %r" # assign variable joke_evaluation a string
print joke_evaluation % hilarious # print string joke_evaluation
w = "This is the left side of..." # assign variable w with a string value
e = "a string with a right side." # assign variable e with a string value
print w + e # print string
| [
"admin@admins-MacBook-Air.local"
] | admin@admins-MacBook-Air.local |
106644e566ea536280b420280431985f0893666e | 0cef1ca8b0fd54095d263d41c22b5b72bdd297db | /ace-zero-rl/d2dsql_train.py | 2a0a577c36b7030811a1bb7d3ff74b4fc72a0ee8 | [] | no_license | budi-kurniawan/phd | 06c1b622f3ed5e518f3ce69ca0f113d411b3f01f | 3ce071462db1ee4a9b590c952a750dd9c99ca9d2 | refs/heads/main | 2023-08-22T02:59:24.228607 | 2021-10-07T12:02:34 | 2021-10-07T12:02:34 | 414,012,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,308 | py | #!/usr/bin/env python3
import os
import csv
import pickle
from datetime import datetime
import numpy as np
import ace_zero_core
import torch
import rl
from ace_zero_core import acezero
from rl import rl_utils
from rl.env.ace_zero_env import AceZeroEnvironment
from cartpole.util.dqn_util import BootstrappableDoubleDQNAgent
from dqn_train import main, get_env_dim, normalise
import dqn_train as base
INITIAL_NN_MIN_LOSS = 0.001
def bootstrap1(self, init_data, trial, out_path):
print('init min loss:', INITIAL_NN_MIN_LOSS)
bootstrap_model_path = out_path + '/bootstrap-model-0' + str(trial) + '.p'
print('bootstrap. trial ', trial, ', bootstrap_model_path:', bootstrap_model_path)
if os.path.exists(bootstrap_model_path):
#file = open(bootstrap_model_path, 'rb')
#model = pickle.load(file)
#file.close()
# problem with load_state_dict for different Pytorch versions
#self.dqn.load_state_dict(model.state_dict()) # copy weights from dqn1 to dqn2
#self.dqn = model
self.load_model(bootstrap_model_path)
print('bootstrapped file found and model loaded')
return
# if this class is derived from DoubleDQNAgent, copy weights to dqn1 and dqn2
#self.dqn1.load_state_dict(model.state_dict()) # copy weights from dqn1 to dqn2
#self.dqn2.load_state_dict(model.state_dict()) # copy weights from dqn1 to dqn2
#self.dqn = self.dqn1
print("==== bootstraping agent with len(init_data):", len(init_data))
init_data_len = len(init_data)
for i in range(init_data_len):
# s and s2 in init_data are NOT normalised
s, a, r, s2, done, _ = init_data[i]
s = base.normalise(s)
s2 = base.normalise(s2)
self.add_sample(s, a, r, s2, done)
start_time = datetime.now()
memory = self.memory.memory
print('memory length:', len(memory))
max_accuracy = 0
min_loss = float('inf')
stats_path = out_path + '/stats-0' + str(trial) + '.txt'
stats_file = open(stats_path, 'w')
for i in range(1, 1_000_000 + 1):
#minibatch = memory #memory[count : count + batch_size]
minibatch = memory #memory[count : count + size]
# next lines are copied from train() of the parent
states = np.vstack([x.state for x in minibatch])
actions = np.array([x.action for x in minibatch])
Q_predict = self.get_Q(states)
Q_target = Q_predict.clone().data.numpy() # Q_target is not a second network, most of its values are the same as the reward at the current timestep
for j in range(init_data_len):
s_not_normalised, a, r, s2_not_normalised, done, action_prefs = init_data[j]
Q_target[j] = action_prefs # we use non-normalised action_prefs and see if it works
Q_target = torch.Tensor(Q_target)
self._train(Q_predict, Q_target)
loss = self.loss.item()
if loss < min_loss:
min_loss = loss
if i % 1000 == 0:
# measure accuracy
Q_predict = self.get_Q(states)
correct_prediction = init_data_len
for j in range(init_data_len):
argmax = np.argmax(Q_predict[j].data.numpy())
if argmax != actions[j]:
correct_prediction -= 1
accuracy = correct_prediction / init_data_len
if accuracy > max_accuracy:
max_accuracy = accuracy
print('iteration ', i, "accuracy:", accuracy, "max:", max_accuracy, ", loss:", loss, ', min Loss:', min_loss)
# if i == 50000 or i % 100_000 == 0:
# end_time = datetime.now()
# delta = end_time - start_time
# msg = 'iteration' + str(i) + ', min loss:' + str(min_loss) + ', loss:' + str(loss) + ', bootstrap time:' + str(delta.total_seconds()) + ' seconds'
# stats_file.write(msg + '\n')
# intermediate_bootstrap_model_path = out_path + '/bootstrap-model-0' + str(trial) + '-' + str(i).zfill(7) + '.pt'
# self.save_model(intermediate_bootstrap_model_path)
# print(msg)
if min_loss < INITIAL_NN_MIN_LOSS:
print('loss ' + str(min_loss) + '. Break at iteration' + str(i))
break
self.save_model(bootstrap_model_path)
#file = open(bootstrap_model_path,'wb')
#pickle.dump(self.dqn, file)
#file.close()
stats_file.write('min loss: ' + str(min_loss) + ', max score: ' + str(max_accuracy))
stats_file.close()
def fixed_epsilon(epsiode: int, max_episode: int, min_eps: float) -> float:
return 0.05
def epsilon_annealing2(epsiode: int, max_episode: int, min_eps: float) -> float:
min_eps = 0.01
slope = (min_eps - 1.0) / max_episode
return max(slope * epsiode + 0.2, min_eps)
def not_normalise(state):
print('not normalise')
return state
def get_bootstrap_data(trial):
bootstrap_file = bootstrap_training_set_path + '/trainingset0' + str(trial) + '.txt'
print('get bootstrap_data for trial', trial, bootstrap_file)
data = []
file = open(bootstrap_file, 'r')
lines = file.readlines()
for line in lines:
# format episode name,[state],[actions preferences],[next state],reward. Example: 1,[1,2,3,4],[1,2,3,4,5,],[1,2,3,4],1
index1 = line.index(',')
ep = int(line[0 : index1])
index1 = line.index('[', index1 + 1)
index2 = line.index(']', index1 + 1)
state = line[index1+1 : index2]
# data from trainingset0x.txt has NOT been normalised, see create_classifier() in aircombat_classifier.py
state = [float(s) for s in state.split(',')]
index1 = line.index('[', index1 + 1)
index2 = line.index(']', index1 + 1)
action_prefs = line[index1+1 : index2]
action_prefs = [float(s) for s in action_prefs.split(',')]
index1 = line.index('[', index1 + 1)
index2 = line.index(']', index1 + 1)
next_state = line[index1+1 : index2]
next_state = [float(s) for s in next_state.split(',')]
reward = float(line[index2 + 2 : ])
action = np.argmax(action_prefs)
data.append((np.array(state), action, reward, np.array(next_state), False, action_prefs))
# we can sort data on reward and trim rows here
return data
base.DQNAgent = BootstrappableDoubleDQNAgent
base.get_bootstrap_data = get_bootstrap_data
base.hidden_dim = 300
base.DQNAgent.bootstrap = bootstrap1
#base.epsilon_annealing = epsilon_annealing2
base.epsilon_annealing = fixed_epsilon
#base.normalise = not_normalise
if __name__ == '__main__':
#os.environ['OMP_NUM_THREADS'] = '1' --> does not work
#torch.set_num_threads(1) --> does not work
scenario_name = 'standard-001.json'
bootstrap_training_set_path = 'rl_results/ql-d2dspl-001.json'
out_path = 'rl_results/bootstrapped-dqn-002j.json'
if not os.path.exists(out_path):
os.mkdir(out_path)
print('start:', datetime.now().strftime('%d/%m/%y %H:%M:%S'))
env = AceZeroEnvironment(scenario_name)
INITIAL_NN_MIN_LOSS = 0.01 #0.001
input_dim, output_dim = get_env_dim(env)
base.NUM_EPISODES = 10_000
base.START_TRIAL = 0
base.NUM_TRIALS = 1 + base.START_TRIAL
main(env, input_dim, output_dim, out_path)
| [
"budi2020@gmail.com"
] | budi2020@gmail.com |
a2f6e90ddafd79c840f3f6b3b0b229486b9cb41b | 06a045819cf99c7059afde40dca12cf9d3eb5f81 | /pandas/tests/apply/test_invalid_arg.py | d75b784302676082b81f3ea18338f9914f89b780 | [
"BSD-3-Clause"
] | permissive | MarcoGorelli/pandas | b9882c6ac1e4bc753819b7bc7c8b567964efd275 | 86a4ee01c7899ef454d35b95cde11e9593921c9d | refs/heads/main | 2023-08-22T12:35:45.122152 | 2023-05-04T22:11:07 | 2023-05-04T22:11:07 | 164,618,359 | 4 | 1 | BSD-3-Clause | 2023-05-05T09:02:23 | 2019-01-08T09:55:54 | Python | UTF-8 | Python | false | false | 10,386 | py | # Tests specifically aimed at detecting bad arguments.
# This file is organized by reason for exception.
# 1. always invalid argument values
# 2. missing column(s)
# 3. incompatible ops/dtype/args/kwargs
# 4. invalid result shape/type
# If your test does not fit into one of these categories, add to this list.
from itertools import chain
import re
import numpy as np
import pytest
from pandas.errors import SpecificationError
from pandas import (
DataFrame,
Series,
date_range,
notna,
)
import pandas._testing as tm
@pytest.mark.parametrize("result_type", ["foo", 1])
def test_result_type_error(result_type, int_frame_const_col):
# allowed result_type
df = int_frame_const_col
msg = (
"invalid value for result_type, must be one of "
"{None, 'reduce', 'broadcast', 'expand'}"
)
with pytest.raises(ValueError, match=msg):
df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type)
def test_apply_invalid_axis_value():
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.apply(lambda x: x, 2)
def test_agg_raises():
# GH 26513
df = DataFrame({"A": [0, 1], "B": [1, 2]})
msg = "Must provide"
with pytest.raises(TypeError, match=msg):
df.agg()
def test_map_with_invalid_na_action_raises():
# https://github.com/pandas-dev/pandas/issues/32815
s = Series([1, 2, 3])
msg = "na_action must either be 'ignore' or None"
with pytest.raises(ValueError, match=msg):
s.map(lambda x: x, na_action="____")
@pytest.mark.parametrize("input_na_action", ["____", True])
def test_map_arg_is_dict_with_invalid_na_action_raises(input_na_action):
# https://github.com/pandas-dev/pandas/issues/46588
s = Series([1, 2, 3])
msg = f"na_action must either be 'ignore' or None, {input_na_action} was passed"
with pytest.raises(ValueError, match=msg):
s.map({1: 2}, na_action=input_na_action)
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
@pytest.mark.parametrize("func", [{"A": {"B": "sum"}}, {"A": {"B": ["sum"]}}])
def test_nested_renamer(frame_or_series, method, func):
# GH 35964
obj = frame_or_series({"A": [1]})
match = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=match):
getattr(obj, method)(func)
@pytest.mark.parametrize(
"renamer",
[{"foo": ["min", "max"]}, {"foo": ["min", "max"], "bar": ["sum", "mean"]}],
)
def test_series_nested_renamer(renamer):
s = Series(range(6), dtype="int64", name="series")
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
s.agg(renamer)
def test_apply_dict_depr():
tsdf = DataFrame(
np.random.randn(10, 3),
columns=["A", "B", "C"],
index=date_range("1/1/2000", periods=10),
)
msg = "nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
tsdf.A.agg({"foo": ["sum", "mean"]})
@pytest.mark.parametrize("method", ["agg", "transform"])
def test_dict_nested_renaming_depr(method):
df = DataFrame({"A": range(5), "B": 5})
# nested renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
getattr(df, method)({"A": {"foo": "min"}, "B": {"bar": "max"}})
@pytest.mark.parametrize("method", ["apply", "agg", "transform"])
@pytest.mark.parametrize("func", [{"B": "sum"}, {"B": ["sum"]}])
def test_missing_column(method, func):
# GH 40004
obj = DataFrame({"A": [1]})
match = re.escape("Column(s) ['B'] do not exist")
with pytest.raises(KeyError, match=match):
getattr(obj, method)(func)
def test_transform_mixed_column_name_dtypes():
# GH39025
df = DataFrame({"a": ["1"]})
msg = r"Column\(s\) \[1, 'b'\] do not exist"
with pytest.raises(KeyError, match=msg):
df.transform({"a": int, 1: str, "b": int})
@pytest.mark.parametrize(
"how, args", [("pct_change", ()), ("nsmallest", (1, ["a", "b"])), ("tail", 1)]
)
def test_apply_str_axis_1_raises(how, args):
# GH 39211 - some ops don't support axis=1
df = DataFrame({"a": [1, 2], "b": [3, 4]})
msg = f"Operation {how} does not support axis=1"
with pytest.raises(ValueError, match=msg):
df.apply(how, axis=1, args=args)
def test_transform_axis_1_raises():
# GH 35964
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
Series([1]).transform("sum", axis=1)
def test_apply_modify_traceback():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
data.loc[4, "C"] = np.nan
def transform(row):
if row["C"].startswith("shin") and row["A"] == "foo":
row["D"] = 7
return row
def transform2(row):
if notna(row["C"]) and row["C"].startswith("shin") and row["A"] == "foo":
row["D"] = 7
return row
msg = "'float' object has no attribute 'startswith'"
with pytest.raises(AttributeError, match=msg):
data.apply(transform, axis=1)
@pytest.mark.parametrize(
"df, func, expected",
tm.get_cython_table_params(
DataFrame([["a", "b"], ["b", "a"]]), [["cumprod", TypeError]]
),
)
def test_agg_cython_table_raises_frame(df, func, expected, axis):
# GH 21224
msg = "can't multiply sequence by non-int of type 'str'"
with pytest.raises(expected, match=msg):
df.agg(func, axis=axis)
@pytest.mark.parametrize(
"series, func, expected",
chain(
tm.get_cython_table_params(
Series("a b c".split()),
[
("mean", TypeError), # mean raises TypeError
("prod", TypeError),
("std", TypeError),
("var", TypeError),
("median", TypeError),
("cumprod", TypeError),
],
)
),
)
def test_agg_cython_table_raises_series(series, func, expected):
# GH21224
msg = r"[Cc]ould not convert|can't multiply sequence by non-int of type"
if func == "median" or func is np.nanmedian or func is np.median:
msg = r"Cannot convert \['a' 'b' 'c'\] to numeric"
with pytest.raises(expected, match=msg):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
def test_agg_none_to_type():
# GH 40543
df = DataFrame({"a": [None]})
msg = re.escape("int() argument must be a string")
with pytest.raises(TypeError, match=msg):
df.agg({"a": lambda x: int(x.iloc[0])})
def test_transform_none_to_type():
# GH#34377
df = DataFrame({"a": [None]})
msg = "argument must be a"
with pytest.raises(TypeError, match=msg):
df.transform({"a": lambda x: int(x.iloc[0])})
@pytest.mark.parametrize(
"func",
[
lambda x: np.array([1, 2]).reshape(-1, 2),
lambda x: [1, 2],
lambda x: Series([1, 2]),
],
)
def test_apply_broadcast_error(int_frame_const_col, func):
df = int_frame_const_col
# > 1 ndim
msg = "too many dims to broadcast|cannot broadcast result"
with pytest.raises(ValueError, match=msg):
df.apply(func, axis=1, result_type="broadcast")
def test_transform_and_agg_err_agg(axis, float_frame):
# cannot both transform and agg
msg = "cannot combine transform and aggregation operations"
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
float_frame.agg(["max", "sqrt"], axis=axis)
@pytest.mark.parametrize(
"func, msg",
[
(["sqrt", "max"], "cannot combine transform and aggregation"),
(
{"foo": np.sqrt, "bar": "sum"},
"cannot perform both aggregation and transformation",
),
],
)
def test_transform_and_agg_err_series(string_series, func, msg):
# we are trying to transform with an aggregator
with pytest.raises(ValueError, match=msg):
with np.errstate(all="ignore"):
string_series.agg(func)
@pytest.mark.parametrize("func", [["max", "min"], ["max", "sqrt"]])
def test_transform_wont_agg_frame(axis, float_frame, func):
# GH 35964
# cannot both transform and agg
msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
float_frame.transform(func, axis=axis)
@pytest.mark.parametrize("func", [["min", "max"], ["sqrt", "max"]])
def test_transform_wont_agg_series(string_series, func):
# GH 35964
# we are trying to transform with an aggregator
msg = "Function did not transform"
warn = RuntimeWarning if func[0] == "sqrt" else None
warn_msg = "invalid value encountered in sqrt"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(warn, match=warn_msg, check_stacklevel=False):
string_series.transform(func)
@pytest.mark.parametrize(
"op_wrapper", [lambda x: x, lambda x: [x], lambda x: {"A": x}, lambda x: {"A": [x]}]
)
def test_transform_reducer_raises(all_reductions, frame_or_series, op_wrapper):
# GH 35964
op = op_wrapper(all_reductions)
obj = DataFrame({"A": [1, 2, 3]})
obj = tm.get_obj(obj, frame_or_series)
msg = "Function did not transform"
with pytest.raises(ValueError, match=msg):
obj.transform(op)
| [
"noreply@github.com"
] | MarcoGorelli.noreply@github.com |
3f210c9ae5529fd9ab9a3ad99bdf62987cd5fcbc | 44fb87ff6b94736610c7e84ecc00c4045f097328 | /mabozen/conf/datatype_mapping/datatype_mapping_xpath.py | 01a80772abe130002aad38b60ec1c2899faf6cca | [
"MIT"
] | permissive | mabotech/mabozen | c02899dad34310e3c5c68afe2af05d3f11946511 | 531b138fea1212e959ecfb9370b622b0c9f519a5 | refs/heads/master | 2016-09-06T21:31:51.731077 | 2014-07-20T13:44:01 | 2014-07-20T13:44:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,406 | py |
from lxml import etree
class Singleton(type):
def __call__(cls, *args):
if not hasattr(cls, 'instance'):
cls.instance = super(Singleton, cls).__call__(*args)
return cls.instance
class Counter(object):
__metaclass__ = Singleton
def __init__(self):
self.i = 0
def inc(self):
self.i = self.i + 1
return self.i
def dbdt_insert(dbdt_dict):
sql = """insert into mt_t_db_datatype( id, fk_datatype,fk_db,name,alias,shortname,identity,Undefined,scale,width,showscale,showtype,showwidth,ordering,active,createdon,createdby,rowversionstamp)
values (%(id)s, %(fk_datatype)s, %(fk_db)s, '%(name)s', '%(alias)s', '%(shortname)s', '%(identity)s', %(undefined)s, %(scale)s, %(width)s, %(showscale)s, %(showtype)s, %(showwidth)s, %(ordering)s,1, now(), 'MT', 1 );\n""" % dbdt_dict
return sql
def db_insert(vt):
sql = """ insert into mt_t_db_platform(id,name,ordering,active,createdon,createdby,rowversionstamp)
values ( %s, '%s', %s, 1, now(), 'MT', 1);\n """ % vt
return sql
def dt_insert(vd):
print "dt_insert", vd['name']
sql =""" insert into mt_t_datatype(id,name, alias, shortname, ordering,active,createdon,createdby,rowversionstamp)
values ( %(fk_datatype)s, '%(name)s', '%(name)s', '%(shortname)s', %(ordering)s, 1, now(), 'MT', 1 ) ;\n""" % vd
return sql
def convert(val):
if val == 'false':
return 0
elif val == 'true':
return 1
else:
raise Exception('val not false or true')
def dt_extract(pid, node):
i = 0
sql = ""
for dt in node.xpath('DataType'):
i = i + 1
dt_dict = {}
dt_dict['ordering'] = i
dt_dict['fk_db'] = pid
dt_dict['fk_datatype'] = dt.xpath('@DatatypeId')[0]
dt_dict['name'] = dt.xpath('@DatatypeName')[0]
dt_dict['alias'] = dt.xpath('@DatatypeName')[0]
dt_dict['identity'] = dt.xpath('@Identity')[0]
dt_dict['scale'] = dt.xpath('@Scale')[0]
dt_dict['shortname'] = dt.xpath('@ShortName')[0]
dt_dict['showscale'] = convert ( dt.xpath('@ShowScale')[0] )
dt_dict['showtype'] = convert ( dt.xpath('@ShowType')[0] )
dt_dict['showwidth'] = convert ( dt.xpath('@ShowWidth')[0] )
dt_dict['undefined'] = convert( dt.xpath('@Undefined')[0] )
dt_dict['width'] = dt.xpath('@Width')[0]
sql = sql + dt_insert(dt_dict)
return sql
def extract(pid, node):
i = 0
sql = ""
ct = Counter()
for dt in node.xpath('DataType'):
i = i + 1
dt_dict = {}
dt_dict['ordering'] = i
dt_dict['fk_db'] = pid
dt_dict['id'] = ct.inc()
dt_dict['fk_datatype'] = dt.xpath('@DatatypeId')[0]
dt_dict['name'] = dt.xpath('@DatatypeName')[0]
dt_dict['alias'] = dt.xpath('@DatatypeName')[0]
dt_dict['identity'] = dt.xpath('@Identity')[0]
dt_dict['scale'] = dt.xpath('@Scale')[0]
dt_dict['shortname'] = dt.xpath('@ShortName')[0]
dt_dict['showscale'] = convert ( dt.xpath('@ShowScale')[0] )
dt_dict['showtype'] = convert ( dt.xpath('@ShowType')[0] )
dt_dict['showwidth'] = convert ( dt.xpath('@ShowWidth')[0] )
dt_dict['undefined'] = convert( dt.xpath('@Undefined')[0] )
dt_dict['width'] = dt.xpath('@Width')[0]
sql = sql + dbdt_insert(dt_dict)
return sql
def main():
sqlfile = "../../../output/mapping/dt03.sql"
fh = open(sqlfile, 'w')
fn = "DatatypeMappings_SystemDefault.xml"
tree = etree.parse(fn)
DBPlatform = tree.xpath('/DataTypeMapping/DBPlatform')
ordering = 0
for node in DBPlatform:
ordering = ordering + 1
#print node.xpath('@MappingName')
pid = node.xpath('@PlatformId')
pname = node.xpath('@MappingName')
print(pname)
vt = (pid[0], pname[0], ordering)
sql = db_insert(vt)
#fh.write(sql)
if pid == ['0']:
print "Logical (system)"
#sql = dt_extract(pid[0], node)
#fh.write(sql)
if pname[0] == "PostgreSQL 8.0 (system)":
sql = extract(pid[0], node)
fh.write(sql)
fh.close()
if __name__ == '__main__':
main() | [
"aidear@163.com"
] | aidear@163.com |
4a122303145739c3efd1bba21c198a0a48e14a57 | c8c4721e2282aaeece7bb36e6b7c33fe2e4af207 | /torch/testing/_core.py | 8fab432009def6a7c3e7565a7173d08351d7c12f | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | suo/pytorch | ac1008c62906cf0b055c2c851dab611c0a1276b8 | fb0e27d38a8fdab4e1c14d6378c9e41cb30fd6a3 | refs/heads/master | 2023-04-18T18:57:04.622931 | 2022-01-28T04:57:17 | 2022-01-28T05:01:06 | 142,352,607 | 1 | 0 | NOASSERTION | 2019-11-08T19:38:03 | 2018-07-25T20:50:09 | C++ | UTF-8 | Python | false | false | 2,217 | py | """
The testing package contains testing-specific utilities.
"""
import torch
import random
import operator
FileCheck = torch._C.FileCheck
__all__ = [
"FileCheck",
"make_non_contiguous",
]
# Helper function that returns True when the dtype is an integral dtype,
# False otherwise.
# TODO: implement numpy-like issubdtype
def is_integral(dtype: torch.dtype) -> bool:
return dtype in (torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def is_quantized(dtype: torch.dtype) -> bool:
return dtype in (torch.quint8, torch.qint8, torch.qint32, torch.quint4x2)
# Helper function that maps a flattened index back into the given shape
# TODO: consider adding torch.unravel_index
def _unravel_index(flat_index, shape):
flat_index = operator.index(flat_index)
res = []
# Short-circuits on zero dim tensors
if shape == torch.Size([]):
return 0
for size in shape[::-1]:
res.append(flat_index % size)
flat_index = flat_index // size
if len(res) == 1:
return res[0]
return tuple(res[::-1])
def make_non_contiguous(tensor: torch.Tensor) -> torch.Tensor:
if tensor.numel() <= 1: # can't make non-contiguous
return tensor.clone()
osize = list(tensor.size())
# randomly inflate a few dimensions in osize
for _ in range(2):
dim = random.randint(0, len(osize) - 1)
add = random.randint(4, 15)
osize[dim] = osize[dim] + add
# narrow doesn't make a non-contiguous tensor if we only narrow the 0-th dimension,
# (which will always happen with a 1-dimensional tensor), so let's make a new
# right-most dimension and cut it off
input = tensor.new(torch.Size(osize + [random.randint(2, 3)]))
input = input.select(len(input.size()) - 1, random.randint(0, 1))
# now extract the input of correct size from 'input'
for i in range(len(osize)):
if input.size(i) != tensor.size(i):
bounds = random.randint(1, input.size(i) - tensor.size(i))
input = input.narrow(i, bounds, tensor.size(i))
input.copy_(tensor)
# Use .data here to hide the view relation between input and other temporary Tensors
return input.data
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
26d5d9d3d2891e530fc99d2f8effdcc77cb5de2d | 59bd9c968a3a31a73d17f252fe716a3eacdf7f4f | /portfolio/Python/scrapy/swedishtruckparts/__init__.py | 0c560ca0fb1f4fde456d2b7d6ca27f03fa314efc | [
"Apache-2.0"
] | permissive | 0--key/lib | 113ff1e9cf75e446fa50eb065bc3bc36c090d636 | a619938ea523e96ab9e676ace51f5a129e6612e6 | refs/heads/master | 2023-06-23T22:17:54.244257 | 2023-06-21T17:42:57 | 2023-06-21T17:42:57 | 23,730,551 | 3 | 5 | null | 2016-03-22T08:19:30 | 2014-09-06T08:46:41 | Python | UTF-8 | Python | false | false | 37 | py | ACCOUNT_NAME = 'Swedish Truck Parts'
| [
"a.s.kosinov@gmail.com"
] | a.s.kosinov@gmail.com |
b7704990a6b56c8a4e5cc6c0ba186a1021d6aab8 | b7312dc013ba06e5b44b33c0411f4948c4794346 | /study10/process_pool.py | 1c084f6d5a90687202c69b86aeb29cb4307bbb0a | [] | no_license | GaoFuhong/python-code | 50fb298d0c1e7a2af55f1e13e48063ca3d1a189f | 7d17c98011e5a1e74d49332da9f87f5cb576822d | refs/heads/master | 2021-02-07T20:25:06.997173 | 2020-03-01T02:22:41 | 2020-03-01T02:26:04 | 244,072,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | # Author:Fuhong Gao
#进程池 (在windows中启动进程是真的慢。。。)
from multiprocessing import Process,Pool
import time,os
def Foo(i):
time.sleep(1)
print('in process-',os.getpid())
return i+100
def Bar(arg):
print("--> exec done:",arg,'child process id:',os.getpid())
if __name__ == '__main__': #__name__ == 'main' 如果手动执行这个脚本,就执行下面的内容,如果当做一个模块导入,就不执行
pool = Pool(5) #等价于pool = Pool(processes = 5) 允许进程池里同时放入5个进程
print('main process id:',os.getpid())
for j in range(10):
pool.apply_async(func=Foo,args=(j,),callback=Bar) #callback: 回调 主进程执行的回调
# pool.apply(func=Foo,args=(j,)) #串行
# pool.apply_async(func=Foo,args=(j,)) #并行
print('-----------------------')
pool.close()
pool.join() #进程池中进程执行完毕再关闭,如果注释,程序将直接关闭 | [
"1350086369@qq.com"
] | 1350086369@qq.com |
89cd1fb4b5a3ed87edef90e19a0037aa6a37efde | 7a3fc3ea3dd71e4ec85ac73e0af57ae976777513 | /.history/flaskblog_20210526065440.py | 1b3ae27d0644bdbc0bcb4fa8d28387719b71771f | [] | no_license | khanhdk0000/first_proj | 72e9d2bbd788d6f52bff8dc5375ca7f75c0f9dd0 | bec0525353f98c65c3943b6d42727e3248ecfe22 | refs/heads/main | 2023-05-12T10:36:08.026143 | 2021-06-05T15:35:22 | 2021-06-05T15:35:22 | 374,148,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | from flask import Flask, render_template, url_for, request, jsonify
from enum import Enum
app = Flask(__name__)
class PaletteType(Enum):
MONOTONE = 1
DOUTONE = 2
COLORFUL = 3
class HarmonyRule(Enum):
COMPLEMENTARY = 1
ANALOGOUS = 2
TRIAD = 3
SQUARE = 4
BRISE_FAN = 5
SPLIT_COMPLEMETARY = 6
DOUBLE_SPLIT_COMPLEMENTARY = 7
NONE = 8
res = {
'primaryColor': '#FFFFFF',
'paletteType': PaletteType(1).name,
'DOUTONE': HarmonyRule(8).name,
'COLORFUL': HarmonyRule(2).name,
}
posts = [
{
'author': 'Corey Schafer',
'title': 'Blog Post 1',
'content': 'First post content',
'date_posted': 'April 20, 2018'
},
{
'author': 'Jane Doe',
'title': 'Blog Post 2',
'content': 'Second post content',
'date_posted': 'April 21, 2018'
}
]
@app.route('/')
@app.route('/home')
def hello_world():
return render_template('home.html', posts=posts, title='Hey')
@app.route('/about')
def about():
my_var = request.args.get('my_var', None)
return render_template('about.html', posts=posts, var=my_var)
@app.route('/test')
def test():
return 'This is my first API call!'
@app.route('/test2', methods=["POST"])
def testpost():
input_json = request.get_json(force=True)
domain = input_json['domain']
if domain == 'Ecommerce':
res['primaryColor'] = '#E1D89F'
return res
dictToReturn = {'text':input_json['text']}
return jsonify(res)
if __name__ == '__main__':
app.run(debug=True)
| [
"khanhtran28092000@gmail.com"
] | khanhtran28092000@gmail.com |
191371fc150002e925fb46d7fe0edfe9a4d109f0 | c732e1ab1135c4bc0598265ee8fea4db5dc12a2b | /mbme/cs285/envs/__init__.py | 4fdbfc79d65e87b09a17aadc8beb187936db0a46 | [] | no_license | Sohojoe/berkeleydeeprlcourse_fall2020 | 169782fb566aa338e617a301ec1ab7e5f62e49cd | 97eeafde4ff02c8a3429ec5096ed597418b01954 | refs/heads/main | 2023-02-11T13:53:01.867290 | 2021-01-03T05:51:36 | 2021-01-03T05:51:36 | 325,815,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | from gym.envs.registration import register
def register_envs():
register(
id='marathon-hopper-v0',
entry_point='cs285.envs.marathon_envs:HopperEnv',
max_episode_steps=1000,
)
register(
id='marathon-walker-v0',
entry_point='cs285.envs.marathon_envs:WalkerEnv',
max_episode_steps=1000,
)
register(
id='marathon-ant-v0',
entry_point='cs285.envs.marathon_envs:AntEnv',
max_episode_steps=1000,
)
| [
"joe@joebooth.com"
] | joe@joebooth.com |
f0a10672945a50f5744a4032fcec4abfcacbea79 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Projects/sms/pymessages/venv/Lib/site-packages/pip/_internal/commands/list.py | a8e16bf2ddf8cd9df43f0fe7e41d1b984e58ffbc | [
"LicenseRef-scancode-other-permissive"
] | permissive | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:da8deb630dfb102ae17b8f81bb9b3fd82d1bc21620821878f37dccc5c58012e8
size 11312
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
837354c48998a7897e7ccb9dd125e15314090e4f | ef1d38cfef63f22e149d6c9dd14e98955693c50d | /webhook/protos/pogoprotos/data/telemetry/rpc_response_telemetry_pb2.py | 3affea348e33cb38a14af4df63940874948ced59 | [] | no_license | Kneckter/WebhookListener | 4c186d9012fd6af69453d9d51ae33a38aa19b5fd | ea4ff29b66d6abf21cc1424ed976af76c3da5511 | refs/heads/master | 2022-10-09T04:26:33.466789 | 2019-11-24T17:30:59 | 2019-11-24T17:30:59 | 193,372,117 | 2 | 0 | null | 2022-09-23T22:26:10 | 2019-06-23T16:39:34 | Python | UTF-8 | Python | false | true | 3,158 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/data/telemetry/rpc_response_telemetry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.data.telemetry import rpc_response_time_pb2 as pogoprotos_dot_data_dot_telemetry_dot_rpc__response__time__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/telemetry/rpc_response_telemetry.proto',
package='pogoprotos.data.telemetry',
syntax='proto3',
serialized_pb=_b('\n6pogoprotos/data/telemetry/rpc_response_telemetry.proto\x12\x19pogoprotos.data.telemetry\x1a\x31pogoprotos/data/telemetry/rpc_response_time.proto\"u\n\x14RpcResponseTelemetry\x12\x17\n\x0fwindow_duration\x18\x01 \x01(\x02\x12\x44\n\x10response_timings\x18\x02 \x03(\x0b\x32*.pogoprotos.data.telemetry.RpcResponseTimeb\x06proto3')
,
dependencies=[pogoprotos_dot_data_dot_telemetry_dot_rpc__response__time__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RPCRESPONSETELEMETRY = _descriptor.Descriptor(
name='RpcResponseTelemetry',
full_name='pogoprotos.data.telemetry.RpcResponseTelemetry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_duration', full_name='pogoprotos.data.telemetry.RpcResponseTelemetry.window_duration', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='response_timings', full_name='pogoprotos.data.telemetry.RpcResponseTelemetry.response_timings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=253,
)
_RPCRESPONSETELEMETRY.fields_by_name['response_timings'].message_type = pogoprotos_dot_data_dot_telemetry_dot_rpc__response__time__pb2._RPCRESPONSETIME
DESCRIPTOR.message_types_by_name['RpcResponseTelemetry'] = _RPCRESPONSETELEMETRY
RpcResponseTelemetry = _reflection.GeneratedProtocolMessageType('RpcResponseTelemetry', (_message.Message,), dict(
DESCRIPTOR = _RPCRESPONSETELEMETRY,
__module__ = 'pogoprotos.data.telemetry.rpc_response_telemetry_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.telemetry.RpcResponseTelemetry)
))
_sym_db.RegisterMessage(RpcResponseTelemetry)
# @@protoc_insertion_point(module_scope)
| [
"kasmar@gitlab.com"
] | kasmar@gitlab.com |
4086926e4058adfba909c2e6ebe318834bd5172c | 824b582c2e0236e987a29b233308917fbdfc57a7 | /sdk/python/pulumi_google_native/file/v1beta1/get_instance.py | c141f3fd55e7ce5599dce6da9d1e33bd41ae36ad | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | 24601/pulumi-google-native | ce8faf8455609a9572a8cbe0638c66427bf0ae7f | b219a14201c6c58eaa10caaeacbdaab528931adf | refs/heads/master | 2023-08-23T05:48:31.819709 | 2021-10-08T18:50:44 | 2021-10-08T18:50:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,453 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetInstanceResult',
'AwaitableGetInstanceResult',
'get_instance',
'get_instance_output',
]
@pulumi.output_type
class GetInstanceResult:
def __init__(__self__, create_time=None, description=None, etag=None, file_shares=None, kms_key_name=None, labels=None, name=None, networks=None, satisfies_pzs=None, state=None, status_message=None, suspension_reasons=None, tier=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if file_shares and not isinstance(file_shares, list):
raise TypeError("Expected argument 'file_shares' to be a list")
pulumi.set(__self__, "file_shares", file_shares)
if kms_key_name and not isinstance(kms_key_name, str):
raise TypeError("Expected argument 'kms_key_name' to be a str")
pulumi.set(__self__, "kms_key_name", kms_key_name)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if networks and not isinstance(networks, list):
raise TypeError("Expected argument 'networks' to be a list")
pulumi.set(__self__, "networks", networks)
if satisfies_pzs and not isinstance(satisfies_pzs, bool):
raise TypeError("Expected argument 'satisfies_pzs' to be a bool")
pulumi.set(__self__, "satisfies_pzs", satisfies_pzs)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if status_message and not isinstance(status_message, str):
raise TypeError("Expected argument 'status_message' to be a str")
pulumi.set(__self__, "status_message", status_message)
if suspension_reasons and not isinstance(suspension_reasons, list):
raise TypeError("Expected argument 'suspension_reasons' to be a list")
pulumi.set(__self__, "suspension_reasons", suspension_reasons)
if tier and not isinstance(tier, str):
raise TypeError("Expected argument 'tier' to be a str")
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time when the instance was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> str:
"""
The description of the instance (2048 characters or less).
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> str:
"""
Server-specified ETag for the instance resource to prevent simultaneous updates from overwriting each other.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="fileShares")
def file_shares(self) -> Sequence['outputs.FileShareConfigResponse']:
"""
File system shares on the instance. For this version, only a single file share is supported.
"""
return pulumi.get(self, "file_shares")
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
KMS key name used for data encryption.
"""
return pulumi.get(self, "kms_key_name")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Resource labels to represent user provided metadata.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the instance, in the format `projects/{project_id}/locations/{location_id}/instances/{instance_id}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def networks(self) -> Sequence['outputs.NetworkConfigResponse']:
"""
VPC networks to which the instance is connected. For this version, only a single network is supported.
"""
return pulumi.get(self, "networks")
@property
@pulumi.getter(name="satisfiesPzs")
def satisfies_pzs(self) -> bool:
"""
Reserved for future use.
"""
return pulumi.get(self, "satisfies_pzs")
@property
@pulumi.getter
def state(self) -> str:
"""
The instance state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> str:
"""
Additional information about the instance state, if available.
"""
return pulumi.get(self, "status_message")
@property
@pulumi.getter(name="suspensionReasons")
def suspension_reasons(self) -> Sequence[str]:
"""
field indicates all the reasons the instance is in "SUSPENDED" state.
"""
return pulumi.get(self, "suspension_reasons")
@property
@pulumi.getter
def tier(self) -> str:
"""
The service tier of the instance.
"""
return pulumi.get(self, "tier")
class AwaitableGetInstanceResult(GetInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInstanceResult(
create_time=self.create_time,
description=self.description,
etag=self.etag,
file_shares=self.file_shares,
kms_key_name=self.kms_key_name,
labels=self.labels,
name=self.name,
networks=self.networks,
satisfies_pzs=self.satisfies_pzs,
state=self.state,
status_message=self.status_message,
suspension_reasons=self.suspension_reasons,
tier=self.tier)
def get_instance(instance_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:
"""
Gets the details of a specific instance.
"""
__args__ = dict()
__args__['instanceId'] = instance_id
__args__['location'] = location
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:file/v1beta1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value
return AwaitableGetInstanceResult(
create_time=__ret__.create_time,
description=__ret__.description,
etag=__ret__.etag,
file_shares=__ret__.file_shares,
kms_key_name=__ret__.kms_key_name,
labels=__ret__.labels,
name=__ret__.name,
networks=__ret__.networks,
satisfies_pzs=__ret__.satisfies_pzs,
state=__ret__.state,
status_message=__ret__.status_message,
suspension_reasons=__ret__.suspension_reasons,
tier=__ret__.tier)
@_utilities.lift_output_func(get_instance)
def get_instance_output(instance_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceResult]:
"""
Gets the details of a specific instance.
"""
...
| [
"noreply@github.com"
] | 24601.noreply@github.com |
48789cb5e09d9b3d49133bd49bfc603bd40db2f9 | b4c6013f346e178222cc579ede4da019c7f8c221 | /src/main/python/idlelib/autocomplete_w.py | 3374c6e94510aae80dc91e5529c3cb42dbb51c68 | [
"BSD-3-Clause",
"OpenSSL",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"GPL-1.0-or-later",
"LicenseRef-scancode-unicode"
] | permissive | cafebabepy/cafebabepy | e69248c4f3d9bab00e93ee749d273bc2c9244f8d | 4ab0e67b8cd79f2ca7cab6281bc811d3b9bc69c1 | refs/heads/develop | 2022-12-09T21:14:56.651792 | 2019-07-01T09:05:23 | 2019-07-01T09:05:23 | 90,854,936 | 9 | 1 | BSD-3-Clause | 2018-01-02T02:13:51 | 2017-05-10T11:05:11 | Java | UTF-8 | Python | false | false | 17,799 | py | """
An auto-completion window for IDLE, used by the autocomplete extension
"""
from tkinter import *
from tkinter.ttk import Scrollbar
from idlelib.autocomplete import COMPLETE_FILES, COMPLETE_ATTRIBUTES
from idlelib.multicall import MC_SHIFT
HIDE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-hide>>"
HIDE_SEQUENCES = ("<FocusOut>", "<ButtonPress>")
KEYPRESS_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keypress>>"
# We need to bind event beyond <Key> so that the function will be called
# before the default specific IDLE function
KEYPRESS_SEQUENCES = ("<Key>", "<Key-BackSpace>", "<Key-Return>", "<Key-Tab>",
"<Key-Up>", "<Key-Down>", "<Key-Home>", "<Key-End>",
"<Key-Prior>", "<Key-Next>")
KEYRELEASE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keyrelease>>"
KEYRELEASE_SEQUENCE = "<KeyRelease>"
LISTUPDATE_SEQUENCE = "<B1-ButtonRelease>"
WINCONFIG_SEQUENCE = "<Configure>"
DOUBLECLICK_SEQUENCE = "<B1-Double-ButtonRelease>"
class AutoCompleteWindow:
def __init__(self, widget):
# The widget (Text) on which we place the AutoCompleteWindow
self.widget = widget
# The widgets we create
self.autocompletewindow = self.listbox = self.scrollbar = None
# The default foreground and background of a selection. Saved because
# they are changed to the regular colors of list items when the
# completion start is not a prefix of the selected completion
self.origselforeground = self.origselbackground = None
# The list of completions
self.completions = None
# A list with more completions, or None
self.morecompletions = None
# The completion mode. Either autocomplete.COMPLETE_ATTRIBUTES or
# autocomplete.COMPLETE_FILES
self.mode = None
# The current completion start, on the text box (a string)
self.start = None
# The index of the start of the completion
self.startindex = None
# The last typed start, used so that when the selection changes,
# the new start will be as close as possible to the last typed one.
self.lasttypedstart = None
# Do we have an indication that the user wants the completion window
# (for example, he clicked the list)
self.userwantswindow = None
# event ids
self.hideid = self.keypressid = self.listupdateid = self.winconfigid \
= self.keyreleaseid = self.doubleclickid = None
# Flag set if last keypress was a tab
self.lastkey_was_tab = False
def _change_start(self, newstart):
min_len = min(len(self.start), len(newstart))
i = 0
while i < min_len and self.start[i] == newstart[i]:
i += 1
if i < len(self.start):
self.widget.delete("%s+%dc" % (self.startindex, i),
"%s+%dc" % (self.startindex, len(self.start)))
if i < len(newstart):
self.widget.insert("%s+%dc" % (self.startindex, i),
newstart[i:])
self.start = newstart
def _binary_search(self, s):
"""Find the first index in self.completions where completions[i] is
greater or equal to s, or the last index if there is no such
one."""
i = 0; j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m] >= s:
j = m
else:
i = m + 1
return min(i, len(self.completions)-1)
def _complete_string(self, s):
"""Assuming that s is the prefix of a string in self.completions,
return the longest string which is a prefix of all the strings which
s is a prefix of them. If s is not a prefix of a string, return s."""
first = self._binary_search(s)
if self.completions[first][:len(s)] != s:
# There is not even one completion which s is a prefix of.
return s
# Find the end of the range of completions where s is a prefix of.
i = first + 1
j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m][:len(s)] != s:
j = m
else:
i = m + 1
last = i-1
if first == last: # only one possible completion
return self.completions[first]
# We should return the maximum prefix of first and last
first_comp = self.completions[first]
last_comp = self.completions[last]
min_len = min(len(first_comp), len(last_comp))
i = len(s)
while i < min_len and first_comp[i] == last_comp[i]:
i += 1
return first_comp[:i]
def _selection_changed(self):
"""Should be called when the selection of the Listbox has changed.
Updates the Listbox display and calls _change_start."""
cursel = int(self.listbox.curselection()[0])
self.listbox.see(cursel)
lts = self.lasttypedstart
selstart = self.completions[cursel]
if self._binary_search(lts) == cursel:
newstart = lts
else:
min_len = min(len(lts), len(selstart))
i = 0
while i < min_len and lts[i] == selstart[i]:
i += 1
newstart = selstart[:i]
self._change_start(newstart)
if self.completions[cursel][:len(self.start)] == self.start:
# start is a prefix of the selected completion
self.listbox.configure(selectbackground=self.origselbackground,
selectforeground=self.origselforeground)
else:
self.listbox.configure(selectbackground=self.listbox.cget("bg"),
selectforeground=self.listbox.cget("fg"))
# If there are more completions, show them, and call me again.
if self.morecompletions:
self.completions = self.morecompletions
self.morecompletions = None
self.listbox.delete(0, END)
for item in self.completions:
self.listbox.insert(END, item)
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
def show_window(self, comp_lists, index, complete, mode, userWantsWin):
"""Show the autocomplete list, bind events.
If complete is True, complete the text, and if there is exactly one
matching completion, don't open a list."""
# Handle the start we already have
self.completions, self.morecompletions = comp_lists
self.mode = mode
self.startindex = self.widget.index(index)
self.start = self.widget.get(self.startindex, "insert")
if complete:
completed = self._complete_string(self.start)
start = self.start
self._change_start(completed)
i = self._binary_search(completed)
if self.completions[i] == completed and \
(i == len(self.completions)-1 or
self.completions[i+1][:len(completed)] != completed):
# There is exactly one matching completion
return completed == start
self.userwantswindow = userWantsWin
self.lasttypedstart = self.start
# Put widgets in place
self.autocompletewindow = acw = Toplevel(self.widget)
# Put it in a position so that it is not seen.
acw.wm_geometry("+10000+10000")
# Make it float
acw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
acw.tk.call("::tk::unsupported::MacWindowStyle", "style", acw._w,
"help", "noActivates")
except TclError:
pass
self.scrollbar = scrollbar = Scrollbar(acw, orient=VERTICAL)
self.listbox = listbox = Listbox(acw, yscrollcommand=scrollbar.set,
exportselection=False, bg="white")
for item in self.completions:
listbox.insert(END, item)
self.origselforeground = listbox.cget("selectforeground")
self.origselbackground = listbox.cget("selectbackground")
scrollbar.config(command=listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
listbox.pack(side=LEFT, fill=BOTH, expand=True)
acw.lift() # work around bug in Tk 8.5.18+ (issue #24570)
# Initialize the listbox selection
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
# bind events
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
self.keypressid = self.widget.bind(KEYPRESS_VIRTUAL_EVENT_NAME,
self.keypress_event)
for seq in KEYPRESS_SEQUENCES:
self.widget.event_add(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.keyreleaseid = self.widget.bind(KEYRELEASE_VIRTUAL_EVENT_NAME,
self.keyrelease_event)
self.widget.event_add(KEYRELEASE_VIRTUAL_EVENT_NAME,KEYRELEASE_SEQUENCE)
self.listupdateid = listbox.bind(LISTUPDATE_SEQUENCE,
self.listselect_event)
self.winconfigid = acw.bind(WINCONFIG_SEQUENCE, self.winconfig_event)
self.doubleclickid = listbox.bind(DOUBLECLICK_SEQUENCE,
self.doubleclick_event)
return None
def winconfig_event(self, event):
if not self.is_active():
return
# Position the completion list window
text = self.widget
text.see(self.startindex)
x, y, cx, cy = text.bbox(self.startindex)
acw = self.autocompletewindow
acw_width, acw_height = acw.winfo_width(), acw.winfo_height()
text_width, text_height = text.winfo_width(), text.winfo_height()
new_x = text.winfo_rootx() + min(x, max(0, text_width - acw_width))
new_y = text.winfo_rooty() + y
if (text_height - (y + cy) >= acw_height # enough height below
or y < acw_height): # not enough height above
# place acw below current line
new_y += cy
else:
# place acw above current line
new_y -= acw_height
acw.wm_geometry("+%d+%d" % (new_x, new_y))
def hide_event(self, event):
if self.is_active():
self.hide_window()
def listselect_event(self, event):
if self.is_active():
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
def doubleclick_event(self, event):
# Put the selected completion in the text, and close the list
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
def keypress_event(self, event):
if not self.is_active():
return None
keysym = event.keysym
if hasattr(event, "mc_state"):
state = event.mc_state
else:
state = 0
if keysym != "Tab":
self.lastkey_was_tab = False
if (len(keysym) == 1 or keysym in ("underscore", "BackSpace")
or (self.mode == COMPLETE_FILES and keysym in
("period", "minus"))) \
and not (state & ~MC_SHIFT):
# Normal editing of text
if len(keysym) == 1:
self._change_start(self.start + keysym)
elif keysym == "underscore":
self._change_start(self.start + '_')
elif keysym == "period":
self._change_start(self.start + '.')
elif keysym == "minus":
self._change_start(self.start + '-')
else:
# keysym == "BackSpace"
if len(self.start) == 0:
self.hide_window()
return None
self._change_start(self.start[:-1])
self.lasttypedstart = self.start
self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
return "break"
elif keysym == "Return":
self.hide_window()
return None
elif (self.mode == COMPLETE_ATTRIBUTES and keysym in
("period", "space", "parenleft", "parenright", "bracketleft",
"bracketright")) or \
(self.mode == COMPLETE_FILES and keysym in
("slash", "backslash", "quotedbl", "apostrophe")) \
and not (state & ~MC_SHIFT):
# If start is a prefix of the selection, but is not '' when
# completing file names, put the whole
# selected completion. Anyway, close the list.
cursel = int(self.listbox.curselection()[0])
if self.completions[cursel][:len(self.start)] == self.start \
and (self.mode == COMPLETE_ATTRIBUTES or self.start):
self._change_start(self.completions[cursel])
self.hide_window()
return None
elif keysym in ("Home", "End", "Prior", "Next", "Up", "Down") and \
not state:
# Move the selection in the listbox
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
if keysym == "Home":
newsel = 0
elif keysym == "End":
newsel = len(self.completions)-1
elif keysym in ("Prior", "Next"):
jump = self.listbox.nearest(self.listbox.winfo_height()) - \
self.listbox.nearest(0)
if keysym == "Prior":
newsel = max(0, cursel-jump)
else:
assert keysym == "Next"
newsel = min(len(self.completions)-1, cursel+jump)
elif keysym == "Up":
newsel = max(0, cursel-1)
else:
assert keysym == "Down"
newsel = min(len(self.completions)-1, cursel+1)
self.listbox.select_clear(cursel)
self.listbox.select_set(newsel)
self._selection_changed()
self._change_start(self.completions[newsel])
return "break"
elif (keysym == "Tab" and not state):
if self.lastkey_was_tab:
# two tabs in a row; insert current selection and close acw
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
return "break"
else:
# first tab; let AutoComplete handle the completion
self.userwantswindow = True
self.lastkey_was_tab = True
return None
elif any(s in keysym for s in ("Shift", "Control", "Alt",
"Meta", "Command", "Option")):
# A modifier key, so ignore
return None
elif event.char and event.char >= ' ':
# Regular character with a non-length-1 keycode
self._change_start(self.start + event.char)
self.lasttypedstart = self.start
self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
return "break"
else:
# Unknown event, close the window and let it through.
self.hide_window()
return None
def keyrelease_event(self, event):
if not self.is_active():
return
if self.widget.index("insert") != \
self.widget.index("%s+%dc" % (self.startindex, len(self.start))):
# If we didn't catch an event which moved the insert, close window
self.hide_window()
def is_active(self):
return self.autocompletewindow is not None
def complete(self):
self._change_start(self._complete_string(self.start))
# The selection doesn't change.
def hide_window(self):
if not self.is_active():
return
# unbind events
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
for seq in KEYPRESS_SEQUENCES:
self.widget.event_delete(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(KEYPRESS_VIRTUAL_EVENT_NAME, self.keypressid)
self.keypressid = None
self.widget.event_delete(KEYRELEASE_VIRTUAL_EVENT_NAME,
KEYRELEASE_SEQUENCE)
self.widget.unbind(KEYRELEASE_VIRTUAL_EVENT_NAME, self.keyreleaseid)
self.keyreleaseid = None
self.listbox.unbind(LISTUPDATE_SEQUENCE, self.listupdateid)
self.listupdateid = None
self.autocompletewindow.unbind(WINCONFIG_SEQUENCE, self.winconfigid)
self.winconfigid = None
# destroy widgets
self.scrollbar.destroy()
self.scrollbar = None
self.listbox.destroy()
self.listbox = None
self.autocompletewindow.destroy()
self.autocompletewindow = None
| [
"zh1bvtan1@gmail.com"
] | zh1bvtan1@gmail.com |
a54658bd8a22e553868a14b5f60211cd32da3b52 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/parent_database/month_uml_friend.py | aed12b5960becaee8d5dc22304612a6d6ecdb811 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | const request = require('request');
const uuidv4 = require('uuid/v4');
/* Checks to see if the subscription key is available
as an environment variable. If you are setting your subscription key as a
string, then comment these lines out.
If you want to set your subscription key as a string, replace the value for
the Ocp-Apim-Subscription-Key header as a string. */
const subscriptionKey="2785789c0f1c3d8f85baa3853975e246";
if (!subscriptionKey) {
throw new Error('Environment variable for your subscription key is not set.')
};
/* If you encounter any issues with the base_url or path, make sure that you are
using the latest endpoint: https://docs.microsoft.com/azure/cognitive-services/translator/reference/v3-0-translate */
function translateText(){
let options = {
method: 'POST',
baseUrl: 'https://api.cognitive.microsofttranslator.com/',
url: 'translate',
qs: {
'api-version': '3.0',
'to': ['']
},
headers: {
'0b8fa22af0e00cf51cfb47305382addb': subscriptionKey,
'Content-type': 'application/json',
'X-ClientTraceId': uuidv4().toString()
},
body: [{
'text': 'Hello World!'
}],
json: true,
};
request(options, function(err, res, body){
console.log(JSON.stringify(body, null, 4));
});
};
// Call the function to translate text.
translateText();
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
cbaade48d67ca0c94d6331b106f6564b11908192 | 81539aba88c22cf75bd2e14f5e0e92f2bf54e962 | /DarkMatterMap2017/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8/TTbarDMJets_Inclusive_pseudoscalar_LO_Mchi-1_Mphi-500_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8_280000_2_cff.py | aeca49c5bdf95f76983dc2b24ee0ffaaae5aa727 | [] | no_license | nistefan/RandomizedParametersSeparator | ad35b48b95e9745814c0bf9d8d8b6eb8aa479177 | 66a0e291b59113c6b5301768f1c10e36cf23d3c3 | refs/heads/master | 2021-01-03T00:41:17.415005 | 2020-02-19T13:30:54 | 2020-02-19T13:30:54 | 239,838,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,700 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:50704', '1:50622', '1:74286', '1:76871', '1:64821', '1:64912', '1:64968', '1:65030', '1:64997', '1:76347', '1:78483', '1:80152', '1:65207', '1:65297', '1:65435', '1:65535', '1:65606', '1:99640', '1:96398', '1:86147', '1:5140', '1:11710', '1:13118', '1:13804', '1:15666', '1:27582', '1:31980', '1:19461', '1:61852', '1:50792', '1:50839', '1:50846', '1:103742', '1:93787', '1:86214', '1:87364', '1:99569', '1:69745', '1:99635', '1:20439', '1:26926', '1:12497', '1:21462', '1:33811', '1:33287', '1:33762', '1:54940', '1:33849', '1:40276', '1:42017', '1:40025', '1:31694', '1:31788', '1:36482', '1:66701', '1:61870', '1:61876', '1:38771', '1:68960', '1:68050', '1:67071', '1:67120', '1:66062', '1:72409', '1:72426', '1:72491', '1:72538', '1:72571', '1:72192', '1:73528', '1:75024', '1:68573', '1:95995', '1:68837', '1:73217', '1:73361', '1:73232', '1:73347', '1:73370', '1:74070', '1:71746', '1:72258', '1:72268', '1:72271', '1:75671', '1:70732', '1:85085', '1:97995', '1:74464', '1:74707', '1:80366', '1:12548', '1:14692', '1:22066', '1:77367', '1:96461', '1:79852', '1:81093', '1:81413', '1:79174', '1:78922', '1:79131', '1:71146', '1:84132', '1:49441', '1:65412', '1:67916', '1:61460', '1:58374', '1:58635', '1:78667', '1:86588', '1:71454', '1:72027', '1:72158', '1:77534', '1:76426', '1:76466', '1:76473', '1:64763', '1:64861', '1:75330', '1:80068', '1:71588', '1:65908', '1:66329', '1:73417', '1:73574', '1:77609', '1:87757', '1:87891', '1:103132', '1:103148', '1:103190', '1:91285', '1:75513', '1:76148', '1:56135', '1:67909', '1:90666', '1:95182', '1:99798', '1:105654', '1:105795', '1:105902', '1:68761', '1:68584', '1:95391', '1:95581', '1:95672', '1:95938', '1:78073', '1:61557', '1:62060', '1:86652', '1:86229', '1:63279', '1:63768', '1:63881', '1:79148', '1:65508', '1:65752', '1:67085', '1:86936', '1:87109', '1:105607', '1:68085', '1:68086', '1:42936', '1:45110', '1:45301', '1:47949', '1:40858', '1:60642', '1:60666', '1:60862', '1:60979', '1:60938', '1:60943', '1:60956', '1:61102', '1:61030', '1:61182', '1:61393', '1:61498', '1:71100', '1:67877', '1:89339', '1:70044', '1:70612', '1:33202', '1:78084', '1:81554', '1:76092', '1:73965', '1:63724', '1:69652', '1:85044', '1:88265', '1:79500', '1:83014', '1:100994', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/8867CCB3-B318-EA11-9095-0025905B859E.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/6E39EBAF-B318-EA11-AA8D-AC1F6BAC7C10.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/8CD12CC8-FE17-EA11-AB2D-0242AC130002.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/BE1577AC-B318-EA11-84E0-0CC47A4C8E28.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/1CC8E6A9-B318-EA11-B94C-0CC47A78A458.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/CC27F754-B118-EA11-82F0-AC1F6BAC8038.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/0C4EB1B9-B318-EA11-A9AA-0025905B8594.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/C0348BAE-B318-EA11-97D9-0CC47A4C8E22.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/DE5063B5-B318-EA11-9FEF-0CC47A7C3450.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/280000/66967FAD-B318-EA11-B842-0CC47A78A33E.root']); | [
"Nicole.Stefanov@cern.ch"
] | Nicole.Stefanov@cern.ch |
c4f259189903ac5cf05092f0bda5b48ad1860a48 | 680bd46e8eae20e78a425f766432711a47235374 | /models/netflow_qo_s_report_table_row.py | ffb909260dced53f35dbb127152a5ba862a72a9d | [
"Apache-2.0"
] | permissive | ILMostro/lm-sdk-python | 9f45217d64c0fc49caf2f4b279a124c2efe3d24d | 40da5812ab4d50dd1c6c3c68f7ea13c4d8f4fb49 | refs/heads/master | 2022-02-01T16:51:12.810483 | 2019-07-16T17:54:11 | 2019-07-16T17:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,477 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.netflow_data_base import NetflowDataBase # noqa: F401,E501
class NetflowQoSReportTableRow(NetflowDataBase):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data_type': 'str',
'received': 'float',
'sent': 'float',
'type': 'str'
}
attribute_map = {
'data_type': 'dataType',
'received': 'received',
'sent': 'sent',
'type': 'type'
}
def __init__(self, data_type=None, received=None, sent=None, type=None): # noqa: E501
"""NetflowQoSReportTableRow - a model defined in Swagger""" # noqa: E501
self._data_type = None
self._received = None
self._sent = None
self._type = None
self.discriminator = None
if data_type is not None:
self.data_type = data_type
if received is not None:
self.received = received
if sent is not None:
self.sent = sent
if type is not None:
self.type = type
@property
def data_type(self):
"""Gets the data_type of this NetflowQoSReportTableRow. # noqa: E501
:return: The data_type of this NetflowQoSReportTableRow. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this NetflowQoSReportTableRow.
:param data_type: The data_type of this NetflowQoSReportTableRow. # noqa: E501
:type: str
"""
self._data_type = data_type
@property
def received(self):
"""Gets the received of this NetflowQoSReportTableRow. # noqa: E501
:return: The received of this NetflowQoSReportTableRow. # noqa: E501
:rtype: float
"""
return self._received
@received.setter
def received(self, received):
"""Sets the received of this NetflowQoSReportTableRow.
:param received: The received of this NetflowQoSReportTableRow. # noqa: E501
:type: float
"""
self._received = received
@property
def sent(self):
"""Gets the sent of this NetflowQoSReportTableRow. # noqa: E501
:return: The sent of this NetflowQoSReportTableRow. # noqa: E501
:rtype: float
"""
return self._sent
@sent.setter
def sent(self, sent):
"""Sets the sent of this NetflowQoSReportTableRow.
:param sent: The sent of this NetflowQoSReportTableRow. # noqa: E501
:type: float
"""
self._sent = sent
@property
def type(self):
"""Gets the type of this NetflowQoSReportTableRow. # noqa: E501
:return: The type of this NetflowQoSReportTableRow. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this NetflowQoSReportTableRow.
:param type: The type of this NetflowQoSReportTableRow. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetflowQoSReportTableRow, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetflowQoSReportTableRow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"bamboo@build01.us-west-1.logicmonitor.net"
] | bamboo@build01.us-west-1.logicmonitor.net |
c30d66e61d205d652025d4aacc12cb9ed94b3e99 | 67f86bb3d09cbc86cac698b3f0abaf01457a966a | /master/bopytest-code/code/ch4/dt/2/unnecessary_math.py | dca5e5454a0dd1e77c0f748a62b36acbbfcd06ca | [
"MIT"
] | permissive | tied/DevArtifacts | efba1ccea5f0d832d4227c9fe1a040cb93b9ad4f | 931aabb8cbf27656151c54856eb2ea7d1153203a | refs/heads/master | 2020-06-06T01:48:32.149972 | 2018-12-08T15:26:16 | 2018-12-08T15:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | """
This module defines multiply(a, b) and divide(a, b).
>>> import unnecessary_math as um
Here's how you use multiply:
>>> um.multiply(4, 3)
12
>>> um.multiply('a', 3)
'aaa'
Here's how you use divide:
>>> um.divide(10, 5)
2.0
"""
def multiply(a, b):
"""
Returns a multiplied by b.
>>> import unnecessary_math as um
>>> um.multiply(4, 3)
12
>>> um.multiply('a', 3)
'aaa'
"""
return a * b
def divide(a, b):
"""
Returns a divided by b.
>>> import unnecessary_math as um
>>> um.divide(10, 5)
2.0
"""
return a / b
| [
"alexander.rogalsky@yandex.ru"
] | alexander.rogalsky@yandex.ru |
7321457036e055ce425ff626c31f3526474e40da | 45c52da4d20f912e462359b051a3a8f1dced7210 | /module/SequencePlayback.py | bac331fac2553ba6b50ad8f1140d1ee368a3731b | [] | no_license | solpie/SeqTruan | 8b55c37d198898e40a2808d751b011d23022e552 | 4ed23592bf96a9d9261a7cc5fa82cd04d7e3da7d | refs/heads/master | 2021-01-10T03:38:30.230285 | 2015-06-15T14:21:43 | 2015-06-15T14:21:43 | 36,845,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,285 | py | __author__ = 'toramisu'
import os
from PyQt5.Qt import QTimer
from module.Events import *
from model import SImage
class SequencePlayback():
def __init__(self):
self.imageSequence = []
self.framerate = 0
self.currentFrame = -1
self.currentFrameIdx = -1
self.endFrameIdx = 1
self.state = ''
self.timer = QTimer()
self.timer.timerEvent = self.onTick
Event.add(AudioPlaybackEvent.TICK, self.onTick)
Event.add(PlaybackEvent.STATE, self.onState)
self.setFramerate(24)
pass
def onState(self, state):
self.state = state
if state == PlayStateType.PLAY:
self.play()
elif state == PlayStateType.PAUSE:
self.pause()
pass
pass
def onTick(self, time):
self.render()
pass
# def load(self, imagesPath=None):
# if imagesPath:
# for root, dirs, files in os.walk(imagesPath):
# for filespath in files:
# filename = os.path.join(root, filespath).replace('\\', '/')
# # todo support image ext
# if filename.find('.png') < 0:
# continue
# simage = SImage(filename)
# self.imageSequence.append(simage)
# simage.frameIdx = len(self.imageSequence)
# self.endFrameIdx = simage.frameIdx
# print('[load img]: ', filename)
# Event.dis(ActionEvent.LOAD_SEQ, self.imageSequence)
# pass
def play(self):
if not self.timer.isActive():
self.timer.start()
pass
pass
def pause(self):
if self.timer.isActive():
self.timer.stop()
pass
pass
def render(self):
self.currentFrameIdx = (self.currentFrameIdx + 1) % self.endFrameIdx
event = SequencePlaybackEvent()
event.type = SequencePlaybackEvent.RENDER_FRAME
event.frameIdx = self.currentFrameIdx
Event.dis(SequencePlaybackEvent.RENDER_FRAME, event)
def setFramerate(self, framerate):
self.framerate = framerate
self.timer.setInterval(1000 / self.framerate)
pass
| [
"solpie.net@gmail.com"
] | solpie.net@gmail.com |
c2895596fd10a2fc4f221b84f49ad9db95988517 | bb7712c8fab2380ffd37e53136097d8d322a73e7 | /order/migrations/0014_remove_order_order_id.py | e67b11e4d630676c1cd8936e9fbc22e9ab0dfd43 | [] | no_license | nitin1011/Daily-Kart | 5dfaad06c4ab7ea236a8f1b0e29aaea4baba0b81 | 59859bd2dc66563ff1ab0649591e4b19b6b4a85b | refs/heads/master | 2020-08-15T09:52:22.826037 | 2019-10-15T14:56:28 | 2019-10-15T14:56:28 | 215,320,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # Generated by Django 2.2.2 on 2019-10-02 10:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('order', '0013_auto_20191002_1617'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='order_id',
),
]
| [
"nitinjethwani10@gmail.com"
] | nitinjethwani10@gmail.com |
9ba33c43b52f06943abc89aadcf47123451c8752 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-QTKit/PyObjCTest/test_qtmoviemodernizer.py | 924bd3a96364bbd4331610fd72fab57807c6deb9 | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | from PyObjCTools.TestSupport import *
from QTKit import *
class TestQTError (TestCase):
@min_os_level('10.9')
def testConstants(self):
self.assertEqual(QTMovieModernizerStatusUnknown, 0)
self.assertEqual(QTMovieModernizerStatusPreparing, 1)
self.assertEqual(QTMovieModernizerStatusRunning, 2)
self.assertEqual(QTMovieModernizerStatusCancelled, 3)
self.assertEqual(QTMovieModernizerStatusFailed, 4)
self.assertEqual(QTMovieModernizerStatusCompletedWithSuccess, 5)
self.assertEqual(QTMovieModernizerStatusNotRequired, 6)
self.assertIsInstance(QTMovieModernizerOutputFormat_H264, unicode)
self.assertIsInstance(QTMovieModernizerOutputFormat_AppleProRes422, unicode)
self.assertIsInstance(QTMovieModernizerOutputFormat_AppleProRes4444, unicode)
@min_os_level('10.9')
def testMethods(self):
self.assertResultIsBOOL(QTMovieModernizer.requiresModernization_error_)
self.assertArgIsOut(QTMovieModernizer.requiresModernization_error_, 1)
self.assertArgIsBlock(QTMovieModernizer.modernizeWithCompletionHandler_, 0, b'v')
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
c4201c9f0a97d7dd5f456d9fa220b91b3ac2ab85 | 43ec4ea3b633244f5deef45315f19844a852a034 | /ExanteTaxCalculator/src/infrastructure/report_row.py | 80d336933e1578980605ea6bc60f1f9c0ec53095 | [] | no_license | mateuszmidor/PythonStudy | 4a34feab04fe1bcc62a67506a5e7be85fb209d8c | 579b79b76cb5ce27cb9af09a2bd3db3c5ad65595 | refs/heads/master | 2023-05-25T11:32:50.274138 | 2023-05-17T06:08:29 | 2023-05-17T06:08:29 | 21,539,459 | 0 | 0 | null | 2023-05-23T00:40:51 | 2014-07-06T12:20:36 | Python | UTF-8 | Python | false | false | 2,427 | py | from decimal import Decimal
from enum import Enum
from dataclasses import dataclass, fields
from datetime import datetime
from typing import Dict
from src.infrastructure.errors import InvalidReportRowError
@dataclass
class ReportRow:
"""ReportRow is raw CSV report row parsed into a dataclass."""
class OperationType(Enum):
"""Names reflect Exante Transaction Report 'Operation type' column"""
UNKNOWN = "UNKNOWN"
TRADE = "TRADE"
COMMISSION = "COMMISSION"
FUNDING_WITHDRAWAL = "FUNDING/WITHDRAWAL"
AUTOCONVERSION = "AUTOCONVERSION"
DIVIDEND = "DIVIDEND"
TAX = "TAX"
US_TAX = "US TAX"
CORPORATE_ACTION = "CORPORATE ACTION"
ISSUANCE_FEE = "ISSUANCE FEE"
STOCK_SPLIT = "STOCK SPLIT"
transaction_id: int
account_id: str
symbol_id: str
operation_type: OperationType
when: datetime
sum: Decimal
asset: str
eur_equivalent: Decimal
comment: str
def __post_init__(self):
if self.transaction_id < 0:
raise InvalidReportRowError(f"transaction_id should be >= 0, got: {self.transaction_id}")
if self.account_id == "":
raise InvalidReportRowError("account_id should not be empty")
if self.symbol_id == "":
raise InvalidReportRowError("symbol_id should not be empty")
if self.asset == "":
raise InvalidReportRowError("asset should not be empty")
# actually, sum can be 0 for COMMISSION
# if self.sum == 0:
# raise InvalidReportRowError("sum should not be zero")
@classmethod
def from_dict(cls, d: Dict[str, str]):
try:
return cls(
transaction_id=int(d["Transaction ID"]),
account_id=d["Account ID"],
symbol_id=d["Symbol ID"],
operation_type=ReportRow.OperationType(d["Operation type"]),
when=datetime.strptime(d["When"], "%Y-%m-%d %H:%M:%S"),
sum=Decimal(d["Sum"]),
asset=d["Asset"],
eur_equivalent=Decimal(d["EUR equivalent"]),
comment=d["Comment"],
)
except (KeyError, ValueError) as e:
raise InvalidReportRowError from e
def __str__(self) -> str:
lines = [f"{field.name} = {getattr(self, field.name)}" for field in fields(self)]
return "\n".join(lines)
| [
"3demaniac@gmail.com"
] | 3demaniac@gmail.com |
7c80d929692aa65b1400c04ded4efe8f817eae4c | 2e60017779c5c286629ab5a3a7aeb27a6b19a60b | /python/problem_48.py | 704454c508774c17e70776fe8245c09cd9169b94 | [] | no_license | jamesjiang52/10000-Lines-of-Code | f8c7cb4b8d5e441693f3e0f6919731ce4680f60d | 3b6c20b288bad1de5390ad672c73272d98e93ae0 | refs/heads/master | 2020-03-15T03:50:38.104917 | 2018-05-07T04:41:52 | 2018-05-07T04:41:52 | 131,952,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | if __name__ == '__main__':
import sys
sys.path.insert(0, 'C:\\Users\\James Jiang\\Documents\\Project Euler')
from progress import Progress
answers_list = ['dummy']
with open('C:\\Users\\James Jiang\\Documents\\Project Euler\\answers.txt') as answers:
for line in answers:
answers_list.append(int(line))
progress_ = Progress("Problem 048: Self powers", 0, 1000)
sum_power = 0
for i in range(1, 1001):
progress_.count = i
progress_.progress()
sum_power += i**i
progress_.count = int(str(sum_power)[-10:])
progress_.total = answers_list[48]
progress_.progress()
if __name__ == '__main__':
input()
| [
"jamesjiang52@gmail.com"
] | jamesjiang52@gmail.com |
dd23bdde62f4bfd0ad65c5a3b1ba35c4b11db1ee | 0a6c04ce9a83c983558bf2a9b0622c0076b6b6c4 | /collab_app/migrations/0004_auto_20200301_1307.py | 50bdf63652ca7d923d08b6cb80c6723d53e38497 | [] | no_license | madhu0309/collaboratory | f4384affa8a489a1dc5b2614ac83d8ed2547dae1 | 5217d713d2a174e868a26ac9eb00836d006a09ad | refs/heads/master | 2022-12-14T23:13:47.816593 | 2020-03-18T17:43:56 | 2020-03-18T17:43:56 | 235,501,050 | 1 | 0 | null | 2022-12-08T03:50:22 | 2020-01-22T04:52:31 | JavaScript | UTF-8 | Python | false | false | 764 | py | # Generated by Django 3.0.2 on 2020-03-01 13:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('collab_app', '0003_remove_answer_votes'),
]
operations = [
migrations.AddField(
model_name='answer',
name='num_vote_down',
field=models.PositiveIntegerField(db_index=True, default=0),
),
migrations.AddField(
model_name='answer',
name='num_vote_up',
field=models.PositiveIntegerField(db_index=True, default=0),
),
migrations.AddField(
model_name='answer',
name='vote_score',
field=models.IntegerField(db_index=True, default=0),
),
]
| [
"madhu@micropyramid.com"
] | madhu@micropyramid.com |
c82c18ec7ff65774ec97137cad3ce006dd7fbfb1 | 7b280b947f639959bdd034628b433c3b27ef7b91 | /first/contact.py | a7afb6069609cfb7a2478a56d0b8ead2389277c5 | [] | no_license | amaurirg/tutoriais_flask | c0d5e523a16c05e76e4573c02bbe50bef66e28f0 | e8aa6c8e89f1400f0be34204cb1cbb456bfb04b5 | refs/heads/master | 2020-03-31T07:56:22.888257 | 2018-10-08T07:56:42 | 2018-10-08T07:56:42 | 152,039,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | from flask import Blueprint, render_template, request, abort, current_app
bp = Blueprint('contact', __name__, url_prefix='/contact')
@bp.route("/", methods=['GET', 'POST'])
def contact():
if request.method == "GET":
return render_template('contact.html')
# processar dados
# print(request.form)
name = request.form.get('name')
message = request.form.get('message')
# validar dados
if not name or not message:
abort(400, 'Formulário inválido!')
# banco de dados
current_app.db.messages.insert_one({'name': name, 'message': message})
return "Sua mensagem foi enviada com sucesso!"
def configure(app):
app.register_blueprint(bp)
| [
"amaurirg@terra.com.br"
] | amaurirg@terra.com.br |
521fcf8431b2f9adc31ed607139f229a7a6b82d1 | 5873213f0615c13d26c389d8e6aff0291e639d51 | /manage.py | eb5d140281a83806bc9b6fd1521585ff4293510d | [
"MIT"
] | permissive | conferency/conf-panda | 15d9645d5834b78ea27560c58d15a0fe628749ab | d69094174e880b771cd1a5cad981f65374008359 | refs/heads/master | 2020-05-18T16:03:56.716017 | 2019-05-18T04:03:55 | 2019-05-18T04:03:55 | 184,514,509 | 0 | 2 | MIT | 2019-05-18T04:03:57 | 2019-05-02T03:14:46 | JavaScript | UTF-8 | Python | false | false | 7,434 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import glob
from app import create_app, db
from app.utils.fakedata import generate_test_confs, generate_fake_tickets, \
generate_test_users, generate_fake_papers, generate_fake_reviews, \
generate_fake_transactions, generate_fake_schedule, \
generate_default_addons, generate_admin, generate_fake_confs, \
generate_main_conf
from app.models import User, Follow, Role, Permission, Post, Comment, Paper, \
Review, PaperStatus, Invitation, Configuration, Conference, Ticket, \
EmailTemplate, JoinTrack, Author, TicketTransaction, Track, \
Registration, FormConfiguration, PromoCode, Product, ProductOption, \
Payout, Website, Page, UserDoc, Todo, EventLog, DelegateReview, \
ConferenceSchedule, Session, ReviewPreference, RequestLog, \
ConferencePayment, ConferenceTransaction, ConferenceAddon, ReviewComment, \
FavSession, TicketPrice, paper_reviewer, paper_author
from flask_script import Manager, Shell, Server
from flask_migrate import Migrate, MigrateCommand
from config import config
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
print('Test Coverage Analysis Starting')
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
# get config
app = create_app(os.getenv('CONF_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
@migrate.configure
def configure_alembic(c):
# modify config object
c.set_main_option('compare_type', 'True')
return c
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Todo=Todo, Comment=Comment,
Paper=Paper, Review=Review, PaperStatus=PaperStatus,
Invitation=Invitation, Configuration=Configuration,
Conference=Conference, Track=Track,
EmailTemplate=EmailTemplate, JoinTrack=JoinTrack,
Author=Author, ConferenceSchedule=ConferenceSchedule,
TicketTransaction=TicketTransaction, Ticket=Ticket,
Registration=Registration, FormConfiguration=FormConfiguration,
PromoCode=PromoCode, Product=Product, Session=Session,
ProductOption=ProductOption, Payout=Payout, Website=Website,
Page=Page, UserDoc=UserDoc, EventLog=EventLog,
DelegateReview=DelegateReview,
ReviewPreference=ReviewPreference, RequestLog=RequestLog,
ConferencePayment=ConferencePayment,
ConferenceTransaction=ConferenceTransaction,
ConferenceAddon=ConferenceAddon, ReviewComment=ReviewComment,
FavSession=FavSession, TicketPrice=TicketPrice,
paper_reviewer=paper_reviewer, paper_author=paper_author)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('runserver', Server(threaded=True))
@manager.command
def test_logging():
"""Test logging."""
app.logger.error('This is a error log test')
app.logger.info('This is a info log test')
@manager.command
def test(coverage=False):
"""Run the unit tests."""
# enable test coverage
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
print("**************Testing Started**********")
# run the app in tesing configration
app.config.from_object(config['testing'])
config['testing'].init_app(app)
# Remove the sqlite database files if exist
for fl in glob.glob('data-test.sqlite'):
os.remove(fl)
print('old test sqlite database removed')
deploy() # redeploy the database
fakedata() # generate the fakedata
import unittest
tests = unittest.TestLoader().discover('tests')
result = unittest.TextTestRunner(verbosity=2).run(tests).wasSuccessful()
# generate test coverage report
if COV:
COV.stop()
COV.save()
print('Test Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
# the exit code is used for CircleCI
import sys
if result: # tests passed
sys.exit(0)
else: # tests failed
sys.exit(1)
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
# run the db migration script
# this creates all tables when first run, after that
# if the database has no changes, nothing happens
@manager.command
def deploy():
"""Initialize the database and populate init data."""
from flask_migrate import upgrade
upgrade() # upgrade to the latest db schema
# setup necessary data to initialize database
if Conference.query.filter_by(short_name='main').first():
print('database already initialized')
else:
# add registration form questions
FormConfiguration.insert_formConfiguration()
Role.insert_roles() # create user roles
generate_main_conf() # generate default main conference
generate_admin() # generate the site admin
# Caution!!!: this reset db migration and related sqlite files
# The name is designed on purpose to highlight the potential danger
@manager.command
def reset_db_danger():
"""Reset db migration and delete all related files."""
from flask.ext.migrate import init, migrate
# Remove the migration folder if exist
if os.path.exists('migrations'):
shutil.rmtree('migrations')
# Remove the sqlite database files if exist
for fl in glob.glob('*.sqlite'):
os.remove(fl)
# Reset Migration Database
init()
# migrate database to latest revision
migrate(message='init')
@manager.command
def testconfs(email='harryjwang@gmail.com'):
"""Generate fake pending confs."""
generate_fake_confs(10, email) # create 10 pending conferences
@manager.command
def fakedata():
"""Generate fake testing data."""
if User.query.filter_by(email='chair@conferency.com').first():
print ('fake data already generated')
else:
generate_test_confs() # load testing confs and tracks
generate_fake_tickets() # create fake tickets
generate_test_users() # create named fake users
# generate_fake_users(100) # create random users
# add_self_follows() # create self-follows for all users
generate_fake_papers(100) # create random papers
generate_fake_reviews() # create random reviews
generate_fake_transactions() # create fake tickets
generate_fake_schedule()
generate_default_addons()
if __name__ == '__main__':
manager.run()
| [
"harryjwang@gmail.com"
] | harryjwang@gmail.com |
d12502e37805d16aa3555932c6c6d5b5764cea57 | 6c5daf5133656a33574dc2f5b62b9f1a1bdf1390 | /linear programming/gurobi/examples/workforce2.py | d79f3916924e23f35f65e3328dbe22c0e6552f91 | [] | no_license | RobinChen121/Python-Practices | 6c10b721dce3a8d2b76e190959d0940c52f0d1cc | 85bd9ad30c245dd62dc7ea837f964eaecbe24ed9 | refs/heads/master | 2023-08-31T10:08:01.613828 | 2023-08-27T14:51:46 | 2023-08-27T14:51:46 | 142,564,793 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,945 | py | #!/usr/bin/python
# Copyright 2018, Gurobi Optimization, LLC
# Assign workers to shifts; each worker may or may not be available on a
# particular day. If the problem cannot be solved, use IIS iteratively to
# find all conflicting constraints.
from gurobipy import *
# Number of workers required for each shift
shifts, shiftRequirements = multidict({
"Mon1": 3,
"Tue2": 2,
"Wed3": 4,
"Thu4": 4,
"Fri5": 5,
"Sat6": 6,
"Sun7": 5,
"Mon8": 2,
"Tue9": 2,
"Wed10": 3,
"Thu11": 4,
"Fri12": 6,
"Sat13": 7,
"Sun14": 5 })
# Amount each worker is paid to work one shift
workers, pay = multidict({
"Amy": 10,
"Bob": 12,
"Cathy": 10,
"Dan": 8,
"Ed": 8,
"Fred": 9,
"Gu": 11 })
# Worker availability
availability = tuplelist([
('Amy', 'Tue2'), ('Amy', 'Wed3'), ('Amy', 'Fri5'), ('Amy', 'Sun7'),
('Amy', 'Tue9'), ('Amy', 'Wed10'), ('Amy', 'Thu11'), ('Amy', 'Fri12'),
('Amy', 'Sat13'), ('Amy', 'Sun14'), ('Bob', 'Mon1'), ('Bob', 'Tue2'),
('Bob', 'Fri5'), ('Bob', 'Sat6'), ('Bob', 'Mon8'), ('Bob', 'Thu11'),
('Bob', 'Sat13'), ('Cathy', 'Wed3'), ('Cathy', 'Thu4'), ('Cathy', 'Fri5'),
('Cathy', 'Sun7'), ('Cathy', 'Mon8'), ('Cathy', 'Tue9'), ('Cathy', 'Wed10'),
('Cathy', 'Thu11'), ('Cathy', 'Fri12'), ('Cathy', 'Sat13'),
('Cathy', 'Sun14'), ('Dan', 'Tue2'), ('Dan', 'Wed3'), ('Dan', 'Fri5'),
('Dan', 'Sat6'), ('Dan', 'Mon8'), ('Dan', 'Tue9'), ('Dan', 'Wed10'),
('Dan', 'Thu11'), ('Dan', 'Fri12'), ('Dan', 'Sat13'), ('Dan', 'Sun14'),
('Ed', 'Mon1'), ('Ed', 'Tue2'), ('Ed', 'Wed3'), ('Ed', 'Thu4'),
('Ed', 'Fri5'), ('Ed', 'Sun7'), ('Ed', 'Mon8'), ('Ed', 'Tue9'),
('Ed', 'Thu11'), ('Ed', 'Sat13'), ('Ed', 'Sun14'), ('Fred', 'Mon1'),
('Fred', 'Tue2'), ('Fred', 'Wed3'), ('Fred', 'Sat6'), ('Fred', 'Mon8'),
('Fred', 'Tue9'), ('Fred', 'Fri12'), ('Fred', 'Sat13'), ('Fred', 'Sun14'),
('Gu', 'Mon1'), ('Gu', 'Tue2'), ('Gu', 'Wed3'), ('Gu', 'Fri5'),
('Gu', 'Sat6'), ('Gu', 'Sun7'), ('Gu', 'Mon8'), ('Gu', 'Tue9'),
('Gu', 'Wed10'), ('Gu', 'Thu11'), ('Gu', 'Fri12'), ('Gu', 'Sat13'),
('Gu', 'Sun14')
])
# Model
m = Model("assignment")
# Assignment variables: x[w,s] == 1 if worker w is assigned to shift s.
# Since an assignment model always produces integer solutions, we use
# continuous variables and solve as an LP.
x = m.addVars(availability, ub=1, name="x")
# The objective is to minimize the total pay costs
m.setObjective(quicksum(pay[w]*x[w,s] for w,s in availability), GRB.MINIMIZE)
# Constraint: assign exactly shiftRequirements[s] workers to each shift s
reqCts = m.addConstrs((x.sum('*', s) == shiftRequirements[s]
for s in shifts), "_")
# Optimize
m.optimize()
status = m.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
exit(0)
if status == GRB.Status.OPTIMAL:
print('The optimal objective is %g' % m.objVal)
exit(0)
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
exit(0)
# do IIS
print('The model is infeasible; computing IIS')
removed = []
# Loop until we reduce to a model that can be solved
while True:
m.computeIIS()
print('\nThe following constraint cannot be satisfied:')
for c in m.getConstrs():
if c.IISConstr:
print('%s' % c.constrName)
# Remove a single constraint from the model
removed.append(str(c.constrName))
m.remove(c)
break
print('')
m.optimize()
status = m.status
if status == GRB.Status.UNBOUNDED:
print('The model cannot be solved because it is unbounded')
exit(0)
if status == GRB.Status.OPTIMAL:
break
if status != GRB.Status.INF_OR_UNBD and status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % status)
exit(0)
print('\nThe following constraints were removed to get a feasible LP:')
print(removed)
| [
"40953071+RobinChen121@users.noreply.github.com"
] | 40953071+RobinChen121@users.noreply.github.com |
9fbb49ae597c0575ccb1b43a4264273f6ae1d2df | 4b7d5c8824df4462a338993efcdfa3b17199ff5b | /基础/day8/logging_mod.py | 9016888efa6bae9265a37e4dcdd2363dd0e69449 | [] | no_license | kobe24shou/python | 9c287babfb357e7f650fab453f3e60614b7a71fc | f78f147101f182207a69f0dc8e1595b54280164a | refs/heads/master | 2021-06-02T12:40:59.424542 | 2020-06-28T06:13:51 | 2020-06-28T06:13:51 | 101,620,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python
# -*-coding:utf-8-*-
# Author:ls
# aishou24@gmail.com
# date:2018/6/10
import logging
# basicConfig 配置日志级别,日志格式,输出位置
logging.basicConfig(level=logging.DEBUG, # 日志级别debug
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='test.log',
filemode='a') # w 或 a 模式
# Sun, 10 Jun 2018 16:56:33 logging_mod.py[line:15] DEBUG debug message
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
logging.critical('critical message')
| [
"aishou24@gmail.com"
] | aishou24@gmail.com |
ba2aa6eceb577173190bad4307320afd84789292 | 716abd9e5ba4b72b72cc5f724a6cc0a6ad4390d1 | /11-Loops - for and while loops with break, continue and pass/49-Simple-practice-with-for-loop.py | 52de14b4a76bdbac60ab24a638e83c0cd0010073 | [] | no_license | devopstasks/PythonScripting | ac45edd72dc134ec3539b962f02dfc866f365ecf | 48bc37733ae6b3be4e2d64909ffe0962b6908518 | refs/heads/master | 2023-03-29T11:18:01.329452 | 2021-04-07T03:25:20 | 2021-04-07T03:25:20 | 350,388,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | '''
============
Simple practice with forloop
=> Read a string and print chars and their index values
============
'''
#str=input("Enter a string: ")
'''
str="python"
for each in str:
print(each,'-->',str.index(each))
'''
str="python"
index=0
for each in str:
print(f'{each}-->{index}')
index=index+1
| [
"rpadhan2015@gmail.com"
] | rpadhan2015@gmail.com |
92ddc4f26addab8501de36d65fb48ec5d2eecd95 | 0b5383d3099d62cb5e5b2f197bf4d648b51e3a1d | /flask_projects/Discover_Flask/project/users/form.py | 5c226e9142c70f9a9267b23d3dd098079fb7633a | [] | no_license | Sysa/py_samples | 386134d160dad2be6797d415c5cc6a657ef9e375 | 957a77e6601106e1917d4931784a25277478eeb8 | refs/heads/master | 2021-01-13T09:15:38.395169 | 2016-10-31T13:29:41 | 2016-10-31T13:29:41 | 72,438,805 | 1 | 0 | null | 2016-10-31T13:27:58 | 2016-10-31T13:27:58 | null | UTF-8 | Python | false | false | 862 | py | from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
class RegisterForm(Form):
username = StringField(
'username',
validators=[DataRequired(), Length(min=3, max=25)]
)
email = StringField(
'email',
validators=[DataRequired(), Email(message=None), Length(min=6, max=40)]
)
password = PasswordField(
'password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Repeat password',
validators=[
DataRequired(), EqualTo('password', message='Passwords must match.')
]
) | [
"meth787@gmail.com"
] | meth787@gmail.com |
4c2698dc3814d58bc9b6639b43b0bb2be5e29d8a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/316/89621/submittedfiles/testes.py | b9143a54f8f0a7ae84e865f2e91e6678453e876f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n=int(input('Digite um numero:'))
m=int(input('Digite um numero:'))
d=1
while n%d>=0 and m%d>=0:
d=d+1
print(d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
efbf8b4c9f0e58d826b8a8ef0a14b26e8a9afc91 | 736785ff9b31f83e1452117652dcdf5b22d9c39f | /main.py | 80029795276e8fe8f022c79e760d0d3a17e746c6 | [] | no_license | moskrc/mock_subprocess | 9fc99a43e7dedeabfd44c475c539c44f31c1c25b | 217023448f3c0caa3f69504ca499c3aca047905f | refs/heads/master | 2016-09-11T02:45:05.678718 | 2014-12-19T11:17:21 | 2014-12-19T11:17:21 | 28,226,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | import f1
import f2
def superfunction():
return [x.action() for x in [f1, f2]]
if __name__ == '__main__':
print superfunction() | [
"moskrc@gmail.com"
] | moskrc@gmail.com |
2b83412570a153958decb635c6bfddbd7e7c41df | 6d3a1f9fa0f56c1081e15f1c8a26e1829067779c | /Class 16 (OOP coding)/inheritance_polymorphism_encapsulation .py | 60e455e8c6d2c0b596de53f0c6fd099ea3171355 | [] | no_license | siyam04/python-course_materials | e813e0072eba325ef52054f06516fdc1bb2cf0a4 | 9627c85a083b9c0e38604ea1fe021428ceb3fcae | refs/heads/master | 2022-04-07T17:54:23.550256 | 2020-03-03T07:34:41 | 2020-03-03T07:34:41 | 155,817,366 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,320 | py |
########################################## Inheritance ########################################
# Parent class
class Bird:
def __init__(self):
print("Bird is ready")
def whoisThis(self):
print("Bird")
def swim(self):
print("Swim faster")
# child class
class Penguin(Bird):
def __init__(self):
# call super() function
super().__init__()
print("Penguin is ready")
def whoisThis(self):
print("Penguin")
def run(self):
print("Run faster")
# Body
peggy = Penguin()
peggy.whoisThis()
peggy.swim()
peggy.run()
"""
Output:
------------------
Bird is ready
Penguin is ready
Penguin
Swim faster
Run faster
------------------
In the above program, we created two classes i.e. Bird (parent class) and Penguin (child class).
The child class inherits the functions of parent class. We can see this from swim() method.
Again, the child class modified the behavior of parent class. We can see this from whoisThis() method.
Furthermore, we extend the functions of parent class, by creating a new run() method.
Additionally, we use super() function before __init__() method.
This is because we want to pull the content of __init__() method from the parent class into the
child class.
"""
########################################## Encapsulation ########################################
class Computer:
def __init__(self):
self.__maxprice = 900
def sell(self):
print("Selling Price: {}".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
# Body
c = Computer()
c.sell()
# change the price
c.__maxprice = 1000
c.sell()
# using setter function
c.setMaxPrice(1000)
c.sell()
"""
Output:
--------------------
Selling Price: 900
Selling Price: 900
Selling Price: 1000
--------------------
In the above program, we defined a class Computer. We use __init__() method to store the maximum
selling price of computer. We tried to modify the price. However, we can’t change it because
Python treats the __maxprice as private attributes. To change the value, we used a setter
function i.e setMaxPrice() which takes price as parameter.
"""
########################################## Polymorphism ########################################
class Parrot:
def fly(self):
print("Parrot can fly")
def swim(self):
print("Parrot can't swim")
class Penguin:
def fly(self):
print("Penguin can't fly")
def swim(self):
print("Penguin can swim")
# common interface
def flying_test(bird):
bird.fly()
# instantiate objects
blu = Parrot()
peggy = Penguin()
# passing the object
flying_test(blu)
flying_test(peggy)
"""
Output:
------------------
Parrot can fly
Penguin can't fly
------------------
In the above program, we defined two classes Parrot and Penguin. Each of them have common method
fly() method. However, their functions are different. To allow polymorphism, we created common
interface i.e flying_test() function that can take any object. Then, we passed the objects
blu and peggy in the flying_test() function, it ran effectively.
"""
| [
"galib.abdullah04@gmail.com"
] | galib.abdullah04@gmail.com |
a20725d5d2ebeee77dd60da9f8c772ea992798ff | a1119965e2e3bdc40126fd92f4b4b8ee7016dfca | /trunk/repy/tests/ut_repytests_testfilehash.py | 1a5946da31b5ba95584d7d6e2132c62f32cb7260 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SeattleTestbed/attic | 0e33211ddf39efdbcf5573d4fc7fa5201aa7310d | f618a962ce2fd3c4838564e8c62c10924f5df45f | refs/heads/master | 2021-06-10T23:10:47.792847 | 2017-05-15T12:05:43 | 2017-05-15T12:05:43 | 20,154,061 | 0 | 1 | null | 2014-10-16T17:21:06 | 2014-05-25T12:34:00 | Python | UTF-8 | Python | false | false | 377 | py | #pragma repy
"""
Files should not be hashable...
"""
if callfunc == "initialize":
myfileobj = file('junk_test.out','w')
try:
mydict = {}
try:
mydict[myfileobj] = 7
except AttributeError:
# I should get an exception here...
pass
else:
print 'files are hashable!'
finally:
myfileobj.close()
removefile('junk_test.out')
| [
"USER@DOMAIN"
] | USER@DOMAIN |
50e646d3d84227c5ed5c8135beca75e6d4bf19dd | a2960cf4ba59a3ccfcb8deb4b46e3b55e17843a1 | /app/api/v1/ports.py | 34fa38442f9a6c16e4ef5d690de1c34ac4c6d2b8 | [
"MIT"
] | permissive | cmz0228/backend | 4108869751d0ea03a6841c82cc123d116b79986a | 31a4fc7027a14147f971ca3d1097e957456daed3 | refs/heads/main | 2023-04-06T23:55:20.687821 | 2021-04-18T10:27:29 | 2021-04-18T10:27:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,119 | py | import typing as t
from fastapi import (
APIRouter,
HTTPException,
Request,
Depends,
Response,
encoders,
)
from fastapi.encoders import jsonable_encoder
from app.db.session import get_db
from app.db.schemas.port import (
PortOut,
PortOpsOut,
PortCreate,
PortEdit,
PortEditBase,
PortUserCreate,
PortUserEdit,
PortUserOut,
PortUserOpsOut,
)
from app.db.schemas.port_usage import (
PortUsageEdit,
PortUsageOut,
PortUsageCreate,
)
from app.db.crud.port import (
get_ports,
get_port,
create_port,
edit_port,
delete_port,
get_port_users,
add_port_user,
edit_port_user,
delete_port_user,
)
from app.db.crud.port_usage import create_port_usage, edit_port_usage
from app.db.crud.port_forward import delete_forward_rule
from app.db.crud.user import get_user
from app.core.auth import (
get_current_active_user,
get_current_active_superuser,
get_current_active_admin,
)
from app.utils.tasks import (
trigger_tc,
remove_tc,
trigger_iptables_reset,
trigger_port_clean,
)
ports_router = r = APIRouter()
@r.get(
"/servers/{server_id}/ports",
response_model=t.Union[t.List[PortOpsOut], t.List[PortOut]],
response_model_exclude_none=False,
)
async def ports_list(
response: Response,
server_id: int,
offset: int = 0,
limit: int = 100,
db=Depends(get_db),
user=Depends(get_current_active_user),
):
"""
Get all ports related to server
"""
ports = get_ports(db, server_id, user)
# This is necessary for react-admin to work
response.headers["Content-Range"] = f"0-9/{len(ports)}"
if user.is_admin():
return [PortOpsOut(**port.__dict__) for port in ports]
return [PortOut(**port.__dict__) for port in ports]
@r.get(
"/servers/{server_id}/ports/{port_id}",
response_model=t.Union[PortOpsOut, PortOut],
response_model_exclude_none=False,
response_model_exclude_unset=False,
)
async def port_get(
response: Response,
server_id: int,
port_id: int,
db=Depends(get_db),
user=Depends(get_current_active_user),
):
"""
Get port by id
"""
port = get_port(db, server_id, port_id)
if not port:
raise HTTPException(status_code=404, detail="Port not found")
if user.is_admin():
return PortOpsOut(**port.__dict__)
if not any(user.id == u.user_id for u in port.allowed_users):
raise HTTPException(status_code=404, detail="Port not found")
return PortOut(**port.__dict__)
@r.post(
"/servers/{server_id}/ports",
response_model=PortOpsOut,
response_model_exclude_none=True,
)
async def port_create(
request: Request,
server_id: int,
port: PortCreate,
db=Depends(get_db),
user=Depends(get_current_active_admin),
):
"""
Create a new port on server
"""
db_port = create_port(db, server_id, port)
trigger_tc(db_port)
return db_port
@r.put(
"/servers/{server_id}/ports/{port_id}",
response_model=PortOpsOut,
response_model_exclude_none=True,
)
async def port_edit(
request: Request,
server_id: int,
port_id: int,
port: PortEdit,
db=Depends(get_db),
user=Depends(get_current_active_user),
):
"""
Update an existing port
"""
db_port = get_port(db, server_id, port_id)
if not db_port:
raise HTTPException(status_code=404, detail="Port not found")
if not user.is_admin():
if not any(u.user_id == user.id for u in db_port.allowed_users):
raise HTTPException(status_code=403, detail="Operation not allowed")
port = PortEditBase(**port.dict(exclude_unset=True))
db_port = edit_port(db, db_port, port)
trigger_tc(db_port)
return db_port
@r.delete(
"/servers/{server_id}/ports/{port_id}",
response_model=PortOpsOut,
response_model_exclude_none=True,
)
async def port_delete(
request: Request,
server_id: int,
port_id: int,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Delete an existing port on server
"""
db_port = get_port(db, server_id, port_id)
if not db_port:
raise HTTPException(status_code=404, detail="Port not found")
if db_port.forward_rule:
trigger_port_clean(db_port.server, db_port)
delete_port(db, server_id, port_id)
remove_tc(server_id, db_port.num)
return db_port
@r.get(
"/servers/{server_id}/ports/{port_id}/users",
response_model=t.List[PortUserOpsOut],
)
async def port_users_get(
request: Request,
server_id: int,
port_id: int,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Get all port users
"""
port_users = get_port_users(db, server_id, port_id)
return jsonable_encoder(port_users)
@r.post(
"/servers/{server_id}/ports/{port_id}/users",
response_model=PortUserOpsOut,
)
async def port_user_add(
request: Request,
server_id: int,
port_id: int,
port_user: PortUserCreate,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Add a port user to port
"""
db_user = get_user(db, port_user.user_id)
if not db_user:
raise HTTPException(status_code=400, detail="User not found")
port_user = add_port_user(db, server_id, port_id, port_user)
return jsonable_encoder(port_user)
@r.put(
"/servers/{server_id}/ports/{port_id}/users/{user_id}",
response_model=PortUserOpsOut,
)
async def port_user_edit(
request: Request,
server_id: int,
port_id: int,
user_id: int,
port_user: PortUserEdit,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Add a port user to port
"""
port_user = edit_port_user(db, server_id, port_id, user_id, port_user)
if not port_user:
raise HTTPException(status_code=400, detail="Port user not found")
return jsonable_encoder(port_user)
@r.delete(
"/servers/{server_id}/ports/{port_id}/users/{user_id}",
response_model=PortUserOut,
)
async def port_users_delete(
request: Request,
server_id: int,
port_id: int,
user_id: int,
db=Depends(get_db),
current_user=Depends(get_current_active_admin),
):
"""
Delete a port user for port
"""
port_user = delete_port_user(db, server_id, port_id, user_id)
return port_user
@r.post(
"/servers/{server_id}/ports/{port_id}/usage",
response_model=PortUsageOut,
)
async def port_usage_edit(
server_id: int,
port_id: int,
port_usage: PortUsageEdit,
db=Depends(get_db),
user=Depends(get_current_active_admin),
):
"""
Update a port usage
"""
db_port_usage = edit_port_usage(db, port_id, port_usage)
if (
db_port_usage
and sum(
[
port_usage.download,
port_usage.upload,
port_usage.download_accumulate,
port_usage.upload_accumulate,
]
)
== 0
):
trigger_iptables_reset(db_port_usage.port)
return db_port_usage
| [
"me@leishi.io"
] | me@leishi.io |
9906aeacea03508f3edd03e58a19465ae05d4766 | 1ec8734beba25739979cbd4a9414a95273cce6aa | /8.18/正则语法.py | 51c2934c93e1dc7af23cb1d4bd7816eaa0bb4200 | [] | no_license | MATATAxD/untitled1 | 4431e4bc504e74d9a96f54fd6065ce46d5d9de40 | 18463f88ce60036959aabedabf721e9d938bacfb | refs/heads/master | 2023-01-01T23:16:30.140947 | 2020-10-23T04:32:38 | 2020-10-23T04:32:38 | 306,529,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | import re
# userInput=input('输入一个手机号')
# partern=r'^1[3,5,7,1,8]\d{9}$'
# result=re.search(partern,userInput)
# if result ==None:
# print('不是手机号')
# else:
# print('是一个正确的手机号')
#
# userInput=input('输入一个座机号')
# partern=r'^(0\d{2,3}-)?\d{7}$'
# result=re.search(partern,userInput)
# if result == None:
# print('不是座机号')
# else:
# print('是一个正确的座机号')
userInput=input('输入一个身份证号')
partern=r'^([1-9]\d{5}[12]\d{3}(0[1-9]|1[012])(0[1-9]|[12][0-9]|3[01])\d{3}[0-9xX])$'
result=re.search(partern,userInput)
if result == None:
print('不是身份证号码')
else:
print('是一个正确的身份证号')
| [
"502513072@qq.com"
] | 502513072@qq.com |
434f9f64b285e82a6dac620f02b1f9b1d0e86234 | 1698fe3ff15a6737c70501741b32b24fe68052f4 | /two-scoops-of-django-1.8-master/code/chapter_09_example_5.py | a3e490dcea97b2ebae7e56c6624897a2d81b32a1 | [] | no_license | menhswu/djangoapps | 4f3718244c8678640af2d2a095d20a405e337884 | 039a42aa9d1537e7beb4071d86bea7a42253d8b3 | refs/heads/master | 2023-03-04T03:56:01.070921 | 2021-01-28T07:35:02 | 2021-01-28T07:35:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | """
Using This Code Example
=========================
The code examples provided are provided by Daniel Greenfeld and Audrey Roy of
Two Scoops Press to help you reference Two Scoops of Django: Best Practices
for Django 1.8. Code samples follow PEP-0008, with exceptions made for the
purposes of improving book formatting. Example code is provided "as is", and
is not intended to be, and should not be considered or labeled as "tutorial code".
Permissions
============
In general, you may use the code we've provided with this book in your programs
and documentation. You do not need to contact us for permission unless you're
reproducing a significant portion of the code or using it in commercial
distributions. Examples:
* Writing a program that uses several chunks of code from this course does not require permission.
* Selling or distributing a digital package from material taken from this book does require permission.
* Answering a question by citing this book and quoting example code does not require permission.
* Incorporating a significant amount of example code from this book into your product's documentation does require permission.
Attributions usually include the title, author, publisher and an ISBN. For
example, "Two Scoops of Django: Best Practices for Django 1.8, by Daniel
Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2015 Two Scoops Press (ISBN-WILL-GO-HERE)."
If you feel your use of code examples falls outside fair use of the permission
given here, please contact us at info@twoscoopspress.org."""
# simple decorator template
import functools
def decorator(view_func):
@functools.wraps(view_func)
def new_view_func(request, *args, **kwargs):
# You can modify the request (HttpRequest) object here.
response = view_func(request, *args, **kwargs)
# You can modify the response (HttpResponse) object here.
return response
return new_view_func
| [
"jinxufang@tencent.com"
] | jinxufang@tencent.com |
03ab30df9d8a152c0d3fec958442d62144e70426 | e263dadfa1aba627ecc9f9d7a8e89dab776e55a1 | /jdcloud_sdk/services/jmr/apis/ShowClusterDetailsRequest.py | 4b89fa1f607ea1c9e27923b379c7dd72b5124373 | [
"Apache-2.0"
] | permissive | Hownever/jdcloud-sdk-python | 31d50e5927aa94050e9b58d94c6cdeda7659bda2 | 3f8c6f595c04c05690abac1110458613212380de | refs/heads/master | 2020-03-22T09:26:18.432911 | 2018-07-05T10:48:41 | 2018-07-05T10:48:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ShowClusterDetailsRequest(JDCloudRequest):
"""
查询集群详情
"""
def __init__(self, parameters, header=None, version="v1"):
super(ShowClusterDetailsRequest, self).__init__(
'/v1/regions/{regionId}/detail', 'GET', header, version)
self.parameters = parameters
class ShowClusterDetailsParameters(object):
def __init__(self, regionId, id):
"""
:param regionId: 地域ID
:param id: 集群ID;由八位字符组成
"""
self.regionId = regionId
self.id = id
| [
"oulinbao@jd.com"
] | oulinbao@jd.com |
06a99559eabc9a07b171d22c70b448721746bb4e | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_366/ch10_2019_02_23_01_09_12_662766.py | ee8138d4dd1d37664a1f6e762006b6e1d7498c70 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | def libras_para_kg(libras):
y = libras/2.2046
return y
| [
"you@example.com"
] | you@example.com |
41c00b612cd1bdc6862d390f2e3a72bc198fff45 | a94c446a0d9ce77df965674f63be54d54b2be577 | /raspy/bcm_2835_pwm_clock_divider.py | b929606aabbb14e10f0005a5afeecee08175d1b5 | [
"MIT"
] | permissive | cyrusbuilt/RasPy | 3434e02c2bff09ef9f3ff4995bda14edc781c14b | 1e34840cc90ea7f19317e881162209d3d819eb09 | refs/heads/master | 2020-03-18T20:19:27.426002 | 2018-08-03T17:07:25 | 2018-08-03T17:07:25 | 135,207,376 | 0 | 0 | MIT | 2018-08-03T17:07:26 | 2018-05-28T20:42:17 | Python | UTF-8 | Python | false | false | 515 | py | """This module provides clock divider constants for the BCM2835 chipset."""
CLOCK_DIVIDER_1 = 1
CLOCK_DIVIDER_2 = 2
CLOCK_DIVIDER_4 = 4
CLOCK_DIVIDER_8 = 8
CLOCK_DIVIDER_16 = 16
CLOCK_DIVIDER_32 = 32
CLOCK_DIVIDER_64 = 64
CLOCK_DIVIDER_128 = 128
CLOCK_DIVIDER_256 = 256
CLOCK_DIVIDER_512 = 512
CLOCK_DIVIDER_1024 = 1024
CLOCK_DIVIDER_2048 = 2048
CLOCK_DIVIDER_4096 = 4096
CLOCK_DIVIDER_8192 = 8192
CLOCK_DIVIDER_16384 = 16384
CLOCK_DIVIDER_32768 = 32768
# TODO docstring the above values. # pylint: disable=fixme
| [
"cyrusbuilt@gmail.com"
] | cyrusbuilt@gmail.com |
d694bff671f5e22a281d723afa307c1dc5ab3397 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/project/scheme/tests/19.py | e80c835acb0cc9ba1a1b160467141c731af8b1cc | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 2,160 | py | test = {
"name": "Problem 19",
"points": 2,
"suites": [
{
"cases": [
{
"code": r"""
scm> (let-to-lambda 1)
1
scm> (let-to-lambda 'a)
a
scm> (let-to-lambda '(+ 1 2))
(+ 1 2)
scm> (let-to-lambda '(let ((a 1)
.... (b 2))
.... (+ a b)))
((lambda (a b) (+ a b)) 1 2)
""",
"hidden": False,
"locked": False
},
{
"code": r"""
scm> '(quoted expressions remain the same)
(quoted expressions remain the same)
scm> (let-to-lambda '(quote (let ((a 1) (b 2)) (+ a b))))
(quote (let ((a 1) (b 2)) (+ a b)))
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
scm> (load 'questions)
""",
"teardown": "",
"type": "scheme"
},
{
"cases": [
{
"code": r"""
scm> '(lambda parameters not affected but body affected)
(lambda parameters not affected but body affected)
scm> (let-to-lambda '(lambda (let a b) (+ let a b)))
(lambda (let a b) (+ let a b))
scm> (let-to-lambda '(lambda (x) a (let ((a x)) a)))
(lambda (x) a ((lambda (a) a) x))
""",
"hidden": False,
"locked": False
},
{
"code": r"""
scm> (let-to-lambda '(let ((a (let ((a 2)) a))
.... (b 2))
.... (+ a b)))
((lambda (a b) (+ a b)) ((lambda (a) a) 2) 2)
scm> (let-to-lambda '(let ((a 1))
.... (let ((b a))
.... b)))
((lambda (a) ((lambda (b) b) a)) 1)
scm> (let-to-lambda '(+ 1 (let ((a 1)) a)))
(+ 1 ((lambda (a) a) 1))
""",
"hidden": False,
"locked": False
}
],
"scored": True,
"setup": r"""
scm> (load 'questions)
""",
"teardown": "",
"type": "scheme"
}
]
}
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
fbc6337296c2ae2561d7e93f7aebcf43171a0297 | bcabd9b183bc011e1ccf7e367fbed0dcaa03eee6 | /1 PYTHON/3 TELUSKO/65_Binary_search.py | 91df698e604998a020488263b59b6aa37105ea3a | [] | no_license | rajeshsvv/Lenovo_Back | 287fe4da2c696aa248ec57a4c45c4f234f6ca9ed | 7e49e38aaf934c65f9992a78404d2b81a4cd0204 | refs/heads/master | 2022-12-23T16:44:41.488128 | 2019-08-29T10:00:10 | 2019-08-29T10:00:10 | 204,859,914 | 0 | 1 | null | 2022-12-10T11:50:31 | 2019-08-28T06:05:35 | Python | UTF-8 | Python | false | false | 505 | py | # in binary search u r values should be in sorted order
pos = -1
def search(list, n):
l = 0
u = len(list) - 1
while l <= u:
mid = (l + u) // 2
if list[mid] == n:
globals()['pos'] = mid
return True
else:
if list[mid] < n:
l = mid + 1
else:
u = mid - 1
return False
list = [4, 7, 8, 12, 46, 99]
n = 12
if search(list, n):
print("Found at", pos)
else:
print("Not Found")
| [
"rajeshsvv01@gmail.com"
] | rajeshsvv01@gmail.com |
ab25787652495d666947f2f0ca7935915fd4aace | fd74a044c0037796455ba4bd4fd44f11c3323599 | /Contest/ABC/Contest_154/b.py | 5e8261afdf126e373cb77a4883e0660faa075bc0 | [] | no_license | tegetege/tegetege_AtCoder | 5ac87e0a7a9acdd50d06227283aa7d95eebe2e2f | ba6c6472082e8255202f4f22a60953d0afe21591 | refs/heads/master | 2022-03-25T00:29:22.952078 | 2022-02-10T14:39:58 | 2022-02-10T14:39:58 | 193,516,879 | 0 | 0 | null | 2019-06-25T13:53:13 | 2019-06-24T14:02:05 | Python | UTF-8 | Python | false | false | 91 | py | S = list(input())
ans = list()
for i in range(len(S)):
ans.append('x')
print(''.join(ans)) | [
"m_take7_ex_d@yahoo.co.jp"
] | m_take7_ex_d@yahoo.co.jp |
633624a2e73da3f8a48a008d6b0e0b666b7e34d5 | 3fe1a72d444a60582fe1e45349c03584e26f7238 | /karel_env/state_generator.py | 2508bbbdf3ba9b0758d5e88a3b87ed3093fdf6d2 | [
"MIT"
] | permissive | tedrepo/demo2program | 16f0b332a08ff8936439b19084cdf71092c8995d | 23464a69bfbf6fac9752fd423d14b03d37d1d1c6 | refs/heads/master | 2023-07-09T17:41:15.128227 | 2018-12-02T00:41:54 | 2018-12-02T00:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class KarelStateGenerator(object):
def __init__(self, seed=None):
self.rng = np.random.RandomState(seed)
# generate an initial env
def generate_single_state(self, h=8, w=8, wall_prob=0.1):
s = np.zeros([h, w, 16]) > 0
# Wall
s[:, :, 4] = self.rng.rand(h, w) > 1 - wall_prob
s[0, :, 4] = True
s[h-1, :, 4] = True
s[:, 0, 4] = True
s[:, w-1, 4] = True
# Karel initial location
valid_loc = False
while(not valid_loc):
y = self.rng.randint(0, h)
x = self.rng.randint(0, w)
if not s[y, x, 4]:
valid_loc = True
s[y, x, self.rng.randint(0, 4)] = True
# Marker: num of max marker == 1 for now
s[:, :, 6] = (self.rng.rand(h, w) > 0.9) * (s[:, :, 4] == False) > 0
s[:, :, 5] = 1 - (np.sum(s[:, :, 6:], axis=-1) > 0) > 0
assert np.sum(s[:, :, 5:]) == h*w, np.sum(s[:, :, :5])
marker_weight = np.reshape(np.array(range(11)), (1, 1, 11))
return s, y, x, np.sum(s[:, :, 4]), np.sum(marker_weight*s[:, :, 5:])
| [
"waltersun81@gmail.com"
] | waltersun81@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.