blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cf134c811fb3713e81e175427e899b6ebd34c10f | 00ee6a3c859362bbc20342c568a27ea2a493e427 | /src/x007007007/djapp/localnet/nameserver/models/domain.py | 35a784009909f91f382019ba5f13b41bf6ab4811 | [
"MIT"
] | permissive | x007007007/raspberrypi | 7721b1fde2763fd28db579ca65217b81ee2193ae | 9dfe49666c029b8bb617830a5c5a873a6106d853 | refs/heads/master | 2022-10-04T04:51:29.974216 | 2022-10-03T16:36:00 | 2022-10-03T16:36:00 | 56,951,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from x007007007.djapp import _models
class DomainQuerySet(_models.QuerySet):
def available(self):
return self.filter(enable=True)
class DomainModel(_models.Model):
objects = DomainQuerySet.as_manager()
name = _models.CharField(max_length=254)
enable = _models.BooleanField(default=False)
def __str__(self):
return f'<Domain: ({self.pk}) .{self.name}>' | [
"x007007007@hotmail.com"
] | x007007007@hotmail.com |
54efa1c0b889e824fede6b3b02a78c8183256961 | 9a9fb43d866dc8fd829211d2b47328ef1f5ed428 | /PI_ROS_WORKSPACES/test/build_isolated/rosbuild/catkin_generated/pkg.installspace.context.pc.py | a8b861fca59e5dbabc02b4c6fbd5d6206f663874 | [] | no_license | droter/auto_mow | 326df42a54676079cac61fe63c40d5d04beb049b | 3742cb2ef78bc06d2771ac4c679e5110909774f8 | refs/heads/master | 2022-05-19T20:18:33.409777 | 2020-04-29T00:42:24 | 2020-04-29T00:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosbuild"
PROJECT_SPACE_DIR = "/home/pi/test/install_isolated"
PROJECT_VERSION = "1.14.4"
| [
"joshuatygert@gmail.com"
] | joshuatygert@gmail.com |
d80d501c329d043b4b71d7d49c4af48ecc537c39 | 2d00083fbb5e7cec01dd248b61f4ff0520695e3c | /neutron_fwaas/extensions/firewall.py | ef1d92469585825258da5ed923c681f207718e03 | [
"Apache-2.0"
] | permissive | Woody89/neutron-fwaas | 47637d2ad3bf83e9c5211b8940bb09a6a14faf3e | 34b6ae323d8182e38f9ff23efd28e95f5f19c439 | refs/heads/master | 2021-01-22T18:33:21.013535 | 2017-08-19T05:57:24 | 2017-08-19T05:57:24 | 100,765,496 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,972 | py | # Copyright 2013 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import constants
from neutron.common import exceptions as nexception
from neutron.plugins.common import constants as p_const
from neutron.services import service_base
from oslo_config import cfg
from oslo_log import log as logging
from neutron.i18n import _
import six
LOG = logging.getLogger(__name__)
# Firewall rule action
FWAAS_ALLOW = "allow"
FWAAS_DENY = "deny"
FWAAS_REJECT = "reject"
# Firewall resource path prefix
FIREWALL_PREFIX = "/fw"
# Firewall Exceptions
class FirewallNotFound(nexception.NotFound):
message = _("Firewall %(firewall_id)s could not be found.")
class FirewallInUse(nexception.InUse):
message = _("Firewall %(firewall_id)s is still active.")
class FirewallInPendingState(nexception.Conflict):
message = _("Operation cannot be performed since associated Firewall "
"%(firewall_id)s is in %(pending_state)s.")
class FirewallNotActive(nexception.NotFound):
message = _("Firewall %(firewall_name)s is not active.")
class FirewallPolicyNotFound(nexception.NotFound):
message = _("Firewall Policy %(firewall_policy_id)s could not be found.")
class FirewallPolicyInUse(nexception.InUse):
message = _("Firewall Policy %(firewall_policy_id)s is being used.")
class FirewallPolicyConflict(nexception.Conflict):
"""FWaaS exception for firewall policy
Occurs when admin policy tries to use another tenant's unshared
policy.
"""
message = _("Operation cannot be performed since Firewall Policy "
"%(firewall_policy_id)s is not shared and does not belong to "
"your tenant.")
class FirewallRuleSharingConflict(nexception.Conflict):
"""FWaaS exception for firewall rules
When a shared policy is created or updated with unshared rules,
this exception will be raised.
"""
message = _("Operation cannot be performed since Firewall Policy "
"%(firewall_policy_id)s is shared but Firewall Rule "
"%(firewall_rule_id)s is not shared")
class FirewallPolicySharingConflict(nexception.Conflict):
"""FWaaS exception for firewall policy
When a policy is shared without sharing its associated rules,
this exception will be raised.
"""
message = _("Operation cannot be performed. Before sharing Firewall "
"Policy %(firewall_policy_id)s, share associated Firewall "
"Rule %(firewall_rule_id)s")
class FirewallPolicyNameExist(nexception.Conflict):
message = _("Firewall Policy %(name)s already exist.")
class FirewallRuleNotFound(nexception.NotFound):
message = _("Firewall Rule %(firewall_rule_id)s could not be found.")
class FirewallNameNotEmpty(nexception.NotFound):
message = _("Name cannot be empty.")
class FirewallPacketfilterNotFound(nexception.NotFound):
message = _('Firewall packetfilter %(fw_packetfilter_id)s '
'could not be found.')
class FirewallPacketfilterServiceNotFound(nexception.NotFound):
message = _('Firewall packetfilter-service %(firewall_packetfilter_id)s '
'could not be found.')
class FirewallPacketfilterDaddrobjNotFound(nexception.NotFound):
message = _('Firewall packetfilter-daddrobj '
'%(firewall_packetfilter_id)s '
'could not be found.')
class FirewallPacketfilterSaddrobjNotFound(nexception.NotFound):
message = _('Firewall packetfilter-saddrobj '
'%(firewall_packetfilter_id)s '
'could not be found.')
class FirewallPacketfilterSZNotFound(nexception.NotFound):
message = _('Firewall packetfilter-sz %(firewall_packetfilter_id)s '
'could not be found.')
class ResourceAllocateException(nexception.NotFound):
message = _('%(content)s')
class FirewallRuleInUse(nexception.InUse):
message = _("Firewall Rule %(firewall_rule_id)s is being used.")
class FirewallRuleCreateFaild(nexception.NotFound):
message = _("Firewall Rule %(name)s create Faild.")
class FirewallRuleUpdateFaild(nexception.NotFound):
message = _("Firewall Rule %(name)s update Faild.")
class FirewallRuleDeleteFaild(nexception.NotFound):
message = _("Firewall Rule %(name)s delete Faild.")
class FirewallRuleNotAssociatedWithPolicy(nexception.InvalidInput):
message = _("Firewall Rule %(firewall_rule_id)s is not associated "
" with Firewall Policy %(firewall_policy_id)s.")
class FirewallRuleInvalidProtocol(nexception.InvalidInput):
message = _("Firewall Rule protocol %(protocol)s is not supported. "
"Only protocol values %(values)s and their integer "
"representation (0 to 255) are supported.")
class FirewallRuleInvalidAction(nexception.InvalidInput):
message = _("Firewall rule action %(action)s is not supported. "
"Only action values %(values)s are supported.")
class FirewallRuleInvalidICMPParameter(nexception.InvalidInput):
message = _("%(param)s are not allowed when protocol "
"is set to ICMP.")
class FirewallRuleWithPortWithoutProtocolInvalid(nexception.InvalidInput):
message = _("Source/destination port requires a protocol")
class FirewallRuleInvalidPortValue(nexception.InvalidInput):
message = _("Invalid value for port %(port)s.")
class FirewallRuleInfoMissing(nexception.InvalidInput):
message = _("Missing rule info argument for insert/remove "
"rule operation.")
class FirewallIpAddressConflict(nexception.InvalidInput):
message = _("Invalid input - IP addresses do not agree with IP Version")
class FirewallIpAddressDuplicate(nexception.InvalidInput):
message = _("Invalid input - IP addresses cannot be duplicated")
class FirewallAssociatePacketfilter(nexception.InUse):
message = _('Firewall %(fw_id)s has packetfilter can not delete.')
class HardwareFirewallDataTimeFormatError(nexception.NotFound):
message = _("Hardware Input %(param)% format error.")
class HardwareFirewallDataTimeError(nexception.NotFound):
message = _("Hardware The start time is greater than the end time.")
class HardwareFirewallAddrobjNotFound(nexception.NotFound):
message = _("Hardware Firewall ip_obj %(addr_obj)s could not be found.")
class HardwareFirewallServerobjNotFound(nexception.NotFound):
message = _("Hardware Firewall Server %(server_obj)s could not be found.")
class HardwareFirewallTimeobjNotFound(nexception.NotFound):
message = _("Hardware Firewall dataTime %(time_id)s could not be found.")
class HardwareFirewallNotFound(nexception.NotFound):
message = _("Could not be found Firewall by policy.")
class HardwareAreaNotFound(nexception.NotFound):
message = _("Hardware Firewall security "
"area could not be found via %(ip)s.")
class HardwareFirewallTimerCreateFaild(nexception.NotFound):
message = _("Hardware Firewall Timer Create faild Name=%(name)s.")
class HardwareFirewallAddrCreateFaild(nexception.NotFound):
message = _("Hardware Firewall Addr create faild Name=%(name)s.")
class HardwareFirewallServiceCreateFaild(nexception.NotFound):
message = _("Hardware Firewall Service create faild Name=%(name)s.")
class HardwareFirewallVlanCreateFaild(nexception.NotFound):
message = _("Hardware Firewall Vlan create faild.")
class HardwareFirewallVrfCreateFaild(nexception.NotFound):
message = _(
"Hardware Firewall Vrf create faild vrf_id=%(vrf_id)s.")
class HardwareFirewallVsysCreateFaild(nexception.NotFound):
message = _(
"Hardware Firewall Vsys create faild vsys_id=%(vsys_id)s.")
class HardwareFirewallSecurityAreaOutExist(nexception.InUse):
message = _(
"Hardware Firewall Out SecurityArea already Exist.")
class HardwareFirewallSecurityAreaTypeError(nexception.InUse):
message = _(
"Hardware Firewall SecurityArea type is in or out.")
class HardwareFirewallSecurityAreaOutNotFound(nexception.NotFound):
message = _("Hardware Firewall Out SecurityArea not found.")
class HardwareFirewallSecurityAreaNameExist(nexception.InUse):
message = _("Hardware Firewall SecurityArea Name already Exist.")
class HardwareFirewallSecurityAreaCidr(nexception.InUse):
message = _("Hardware Firewall SecurityArea Out cidr need one.")
class HardwareFirewallSecurityAreaNotFoundByRouterID(nexception.InUse):
message = _("Hardware Firewall SecurityArea "
"Not Found by router_id=%(router_id)s.")
class HardwareFirewallSecurityAreaCreateFailed(nexception.NotFound):
message = _(
"Hardware Firewall Security Area create "
"faild security_area_name=%(security_area_name)s.")
class HardwareFirewallSecurityareaNotFound(nexception.NotFound):
message = _(
"Hardware Firewall SecurityArea "
"%(security_area_id)s not found")
class HardwareFirewallSecurityareaNotFoundByRouter(nexception.NotFound):
message = _("Hardware Firewall SecurityArea"
" %(security_area_router_id)s not found")
class HardwareFirewallSecurityareaSrcDstDuplicate(nexception.NotFound):
message = _("Hardware Firewall source SecurityArea"
"and destination SecurityArea can't be same")
class HardwareFirewallSecurityareaNotSame(nexception.NotFound):
message = _("Hardware Firewall SecurityArea"
"not same via %(name)s")
class HardwareFirewallRouterIdDuplicate(nexception.NotFound):
message = _("Hardware Firewall src_router_id"
"and dst_router_id can't be same")
class HardwareFirewallSecurityareaDstNotOUT(nexception.NotFound):
message = _("Hardware Firewall destination "
"SecurityArea type can't be OUT")
class HardwareFirewallAddrNotNone(nexception.BadRequest):
message = _("Hardware Firewall source_ip_address or "
"destination_ip_address can't be None")
class HardwareFirewallRouterIDNotNone(nexception.BadRequest):
message = _("Hardware Firewall src_router_id or "
"dst_router_id can't be Null")
class HardwareFirewallAddrExceed(nexception.BadRequest):
message = _("Hardware Firewall source_ip_address or "
"destination_ip_address can't exceed 10")
class HardwareFirewallAddrIllegal(nexception.BadRequest):
message = _("Hardware Firewall ipaddr=%(ipaddr)s"
"is and illegal IP")
class HardwareFirewallVlanNotFound(nexception.NotFound):
message = _("Hardware Firewall Vlan"
" %(vlan_id)s not found")
class HardwareFirewallVrfNotFound(nexception.NotFound):
message = _("Hardware Firewall Vrf"
" %(vrf_id)s not found")
class HardwareFirewallVsysNotFound(nexception.NotFound):
message = _("Hardware Firewall Vsys"
" %(vsys_id)s not found")
class HardwareFirewallVsysAssociaNotFound(nexception.NotFound):
message = _("Hardware Firewall Vsys Associa"
" %(id)s not found")
class FirewallNameExist(nexception.InUse):
message = _("Hardware Firewall Name"
" %(name)s already exist")
class DeviceDeleteFailed(nexception.NotFound):
message = _('Failed while delete %(obj_id)s')
class FirewallExist(nexception.InUse):
message = _('Dptech Firewall allready exist')
class FirewallRulesPacketfilterAssociationsNotFound(nexception.NotFound):
message = _('Hardware Pakcetfilter=%(rule_packetfilter_id)s'
'associations not found')
class HardwareFirewallSecurityAreaInUSe(nexception.InUse):
message = _("HF Firewall SecurityArea "
"%(security_area_name)s is being used.")
class HardwareFirewallSecurityArearouterExist(nexception.InUse):
message = _("HF Firewall SecurityArea Router already Exist.")
class HardwareFirewallSecurityArearouterIsNone(nexception.InUse):
message = _("HF Firewall SecurityArea Router_id is None.")
class HardwareFirewallSecurityArearCidrExist(nexception.InUse):
message = _("HF Firewall SecurityArea Cidr already Exist.")
class HardwareFirewallSecurityArearCidrIsNone(nexception.InUse):
message = _("HF Firewall SecurityArea Cidr is None.")
class HardwareFirewallSecurityAreaDeleteFailed(nexception.NotFound):
message = _("HF Firewall Security_area failed to Delete")
class HardwareFirewallVrfUpdateFailed(nexception.NotFound):
message = _("HF Firewall Vrf failed to Update")
class FirewallRuleNameExist(nexception.Conflict):
message = _("HF Firewall Rule Name=%(name)s already Exist.")
class FirewallDstPortNotEmpty(nexception.BadRequest):
message = _("Destination port can't be null")
# TODO(dougwig) - once this exception is out of neutron, restore this
# class FirewallInternalDriverError(nexception.NeutronException):
# """Fwaas exception for all driver errors.
#
# On any failure or exception in the driver, driver should log it and
# raise this exception to the agent
# """
# message = _("%(driver)s: Internal driver error.")
FirewallInternalDriverError = nexception.FirewallInternalDriverError
class FirewallRuleConflict(nexception.Conflict):
"""Firewall rule conflict exception.
Occurs when admin policy tries to use another tenant's unshared
rule.
"""
message = _("Operation cannot be performed since Firewall Rule "
"%(firewall_rule_id)s is not shared and belongs to "
"another tenant %(tenant_id)s")
fw_valid_protocol_values = [None, constants.PROTO_NAME_TCP,
constants.PROTO_NAME_UDP,
constants.PROTO_NAME_ICMP]
fw_valid_action_values = [FWAAS_ALLOW, FWAAS_DENY, FWAAS_REJECT]
def convert_protocol(value):
if value is None:
return
if value.isdigit():
val = int(value)
if 0 <= val <= 255:
return val
else:
raise FirewallRuleInvalidProtocol(
protocol=value,
values=fw_valid_protocol_values)
elif value.lower() in fw_valid_protocol_values:
return value.lower()
else:
raise FirewallRuleInvalidProtocol(
protocol=value,
values=fw_valid_protocol_values)
def convert_action_to_case_insensitive(value):
if value is None:
return
else:
return value.lower()
def convert_port_to_string(value):
if value is None:
return
else:
return str(value)
def _validate_port_range(data, key_specs=None):
if data is None:
return
data = str(data)
ports = data.split(':')
for p in ports:
try:
val = int(p)
except (ValueError, TypeError):
msg = _("Port '%s' is not a valid number") % p
LOG.debug(msg)
return msg
if val <= 0 or val > 65535:
msg = _("Invalid port '%s'") % p
LOG.debug(msg)
return msg
def _validate_ip_or_subnet_or_none(data, valid_values=None):
if data is None:
return None
msg_ip = attr._validate_ip_address(data, valid_values)
if not msg_ip:
return
msg_subnet = attr._validate_subnet(data, valid_values)
if not msg_subnet:
return
return _("%(msg_ip)s and %(msg_subnet)s") % {'msg_ip': msg_ip,
'msg_subnet': msg_subnet}
attr.validators['type:port_range'] = _validate_port_range
attr.validators['type:ip_or_subnet_or_none'] = _validate_ip_or_subnet_or_none
RESOURCE_ATTRIBUTE_MAP = {
'firewall_rules': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True, 'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:not_empty_string': attr.NAME_MAX_LEN},
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
attr.DESCRIPTION_MAX_LEN},
'is_visible': True, 'default': ''},
'firewall_policy_id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
'shared': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
'protocol': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': None,
'convert_to': convert_protocol,
'validate': {'type:values': fw_valid_protocol_values}},
'ip_version': {'allow_post': True, 'allow_put': True,
'default': 4, 'convert_to': attr.convert_to_int,
'validate': {'type:values': [4, 6]},
'is_visible': True},
'source_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': None},
'destination_ip_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
None},
'is_visible': True, 'default': None},
'src_router_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:not_empty_string': None},
'type:uuid': None,
'is_visible': True},
'dst_router_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:not_empty_string': None},
'type:uuid': None,
'is_visible': True},
'source_port': {'allow_post': True, 'allow_put': True,
'validate': {'type:port_range': None},
'convert_to': convert_port_to_string,
'default': None, 'is_visible': True},
'destination_port': {'allow_post': True, 'allow_put': True,
'validate': {'type:port_range': None},
'convert_to': convert_port_to_string,
'default': None, 'is_visible': True},
'position': {'allow_post': False, 'allow_put': False,
'default': None, 'is_visible': True},
'action': {'allow_post': True, 'allow_put': True,
'convert_to': convert_action_to_case_insensitive,
'validate': {'type:values': fw_valid_action_values},
'is_visible': True, 'default': 'deny'},
'enabled': {'allow_post': True, 'allow_put': True,
'default': True, 'convert_to': attr.convert_to_boolean,
'is_visible': True},
'start_time': {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
attr.DESCRIPTION_MAX_LEN},
'is_visible': True, 'default': ''},
'end_time': {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
attr.DESCRIPTION_MAX_LEN},
'is_visible': True, 'default': ''},
'session_type': {'allow_post': True, 'allow_put': True,
'validate': {'type:values': ['0', '1']},
'is_visible': True, 'default': '0'},
},
'firewall_policies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:not_empty_string': attr.NAME_MAX_LEN},
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
attr.DESCRIPTION_MAX_LEN},
'is_visible': True, 'default': ''},
'shared': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
'firewall_rules': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': attr.convert_none_to_empty_list,
'default': None, 'is_visible': True},
'audited': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True},
},
'security_areas': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'firewall_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True,
},
'router_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:not_empty_string': attr.NAME_MAX_LEN},
'is_visible': True
},
'name': {'allow_post': True, 'allow_put': True,
'type:not_empty_string': None,
'validate': {'type:not_empty_string': attr.NAME_MAX_LEN},
'is_visible': True},
'priority': {'allow_post': False, 'allow_put': False,
'validate': {'type:string': None},
'is_visible': True
},
'security_area_type': {'allow_post': True, 'allow_put': True,
'validate': {
'type:values': ['in', 'out']},
'is_visible': True
},
'cidr': {'allow_post': True, 'allow_put': True,
'validate': {'type:subnet_list': None},
'is_visible': True
},
'ifnames': {'allow_post': False, 'allow_put': True,
'validate': {'type:subnet': None},
'is_visible': True
},
},
'firewalls': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:not_empty_string': None,
'type:string': attr.NAME_MAX_LEN},
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
attr.DESCRIPTION_MAX_LEN},
'is_visible': True, 'default': ''},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_to_boolean,
'validate': {'type:boolean': None
},
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'shared': {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': False, 'required_by_policy': True,
'enforce_policy': True},
'firewall_policy_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:not_empty_string': None,
'type:uuid': None},
'is_visible': True}
},
}
firewall_quota_opts = [
cfg.IntOpt('quota_firewall',
default=-1,
help=_('Number of firewalls allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_policy',
default=-1,
help=_('Number of firewall policies allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_firewall_rule',
default=-1,
help=_('Number of firewall rules allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(firewall_quota_opts, 'QUOTAS')
class Firewall(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Firewall service"
@classmethod
def get_alias(cls):
return "fwaas"
@classmethod
def get_description(cls):
return "Extension for Firewall service"
@classmethod
def get_namespace(cls):
return "http://wiki.openstack.org/Neutron/FWaaS/API_1.0"
@classmethod
def get_updated(cls):
return "2013-02-25T10:00:00-00:00"
@classmethod
def get_resources(cls):
special_mappings = {'firewall_policies': 'firewall_policy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
action_map = {'firewall_policy': {'insert_rule': 'PUT',
'remove_rule': 'PUT'}}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
p_const.FIREWALL,
action_map=action_map,
register_quota=True)
@classmethod
def get_plugin_interface(cls):
return FirewallPluginBase
def update_attributes_map(self, attributes):
super(Firewall, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class FirewallPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return p_const.FIREWALL
def get_plugin_type(self):
return p_const.FIREWALL
def get_plugin_description(self):
return 'Firewall service plugin'
@abc.abstractmethod
def get_firewalls(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_firewall(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_firewall(self, context, firewall):
pass
@abc.abstractmethod
def update_firewall(self, context, id, firewall):
pass
@abc.abstractmethod
def delete_firewall(self, context, id):
pass
@abc.abstractmethod
def get_firewall_rules(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_firewall_rule(self, context, id, fields=None):
pass
@abc.abstractmethod
def create_firewall_rule(self, context, firewall_rule):
pass
@abc.abstractmethod
def create_security_area(self, context, security_area):
pass
@abc.abstractmethod
def update_firewall_rule(self, context, id, firewall_rule):
pass
@abc.abstractmethod
def delete_firewall_rule(self, context, id):
pass
@abc.abstractmethod
def get_firewall_policy(self, context, id, fields=None):
pass
@abc.abstractmethod
def get_firewall_policies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def create_firewall_policy(self, context, firewall_policy):
pass
@abc.abstractmethod
def update_firewall_policy(self, context, id, firewall_policy):
pass
@abc.abstractmethod
def delete_firewall_policy(self, context, id):
pass
@abc.abstractmethod
def insert_rule(self, context, id, rule_info):
pass
@abc.abstractmethod
def remove_rule(self, context, id, rule_info):
pass
@abc.abstractmethod
def delete_firewall_security_area(self, context, id, rule_info):
pass | [
"dongpzh@adtec.com.cn"
] | dongpzh@adtec.com.cn |
aa7b589d699b8db00da9ad7ac174df285adf2b18 | fa688be812459dee92b16defaf3446103f7e557c | /models.py | 45775bda2d4e3c55506cd174fe4a979b84e5f298 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | NextFloor/paste | ed042ebb2a711f2438e3aa1e534a540cd572b411 | 829f7dae7f30c621def9378b82b62d19b2ede043 | refs/heads/master | 2023-02-04T00:20:59.229854 | 2020-04-17T06:59:58 | 2020-04-17T06:59:58 | 97,236,204 | 7 | 1 | MIT | 2023-02-02T06:24:20 | 2017-07-14T13:24:01 | Python | UTF-8 | Python | false | false | 3,755 | py | import botocore
import random
import uuid
from datetime import datetime, timedelta
from passlib.hash import argon2
from pygments.lexers import guess_lexer
from sqlalchemy.sql import exists
from flask import abort
from flask import current_app as app
from flask_boto3 import Boto3
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
boto3 = Boto3()
class Paste(db.Model):
__tablename__ = 'paste'
slug = db.Column(db.String(4), primary_key=True)
source = db.Column(db.Text, nullable=False)
lexer = db.Column(db.String(32), nullable=False)
title = db.Column(db.String(64))
password = db.Column(db.String(128))
is_resource = db.Column(db.Boolean, default=False)
view_count = db.Column(db.Integer, nullable=False, default=0)
created_at = db.Column(db.DateTime, default=datetime.now)
expire_at = db.Column(db.DateTime)
def __init__(self, source, highlight, expiration, title, password, is_resource):
expiration = int(expiration)
if not source:
raise ValueError()
self.source = source
self.is_resource = is_resource
if title:
self.title = title
if password:
self.password = password
if expiration > 0:
self.expire_at = datetime.now() + timedelta(minutes=expiration)
if highlight == 'auto':
self.lexer = guess_lexer(source).aliases[0]
else:
self.lexer = highlight
for _ in range(5):
slug = self._generate_random_slug()
if not db.session.query(exists().where(Paste.slug == slug)).scalar():
self.slug = slug
break
else:
raise RuntimeError()
@db.validates('password')
def _validate_password(self, key, password):
return argon2.hash(password)
def verify_password(self, password):
return (not self.password) or argon2.verify(password, self.password)
def generate_presigned_resource_url(self):
s3 = boto3.clients['s3']
url = s3.generate_presigned_url('get_object', {
'Bucket': app.config['AWS_S3_BUCKET'],
'Key': self.source,
}, ExpiresIn=60)
return url
@classmethod
def get_or_404(cls, slug):
paste = Paste.query.get_or_404(slug)
if paste.expire_at and (paste.expire_at <= datetime.now()):
if paste.is_resource:
s3 = boto3.clients['s3']
s3.delete_object(
Bucket=app.config['AWS_S3_BUCKET'],
Key=paste.source,
)
db.session.delete(paste)
db.session.commit()
abort(404)
return paste
@staticmethod
def _generate_random_slug():
return ''.join(random.choice('ACDEFGHJKLNPQRSTXYabcdefghijknopqrstxyz3456789') for _ in range(4))
@staticmethod
def generate_random_resource_key():
s3 = boto3.clients['s3']
for _ in range(5):
key = str(uuid.uuid4())
try:
s3.head_object(
Bucket=app.config['AWS_S3_BUCKET'],
Key=key,
)
except botocore.exceptions.ClientError as e:
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return key
else:
raise
else:
raise RuntimeError()
@staticmethod
def upload_file(key, fs):
s3 = boto3.clients['s3']
s3.put_object(
Body=fs.read(),
Bucket=app.config['AWS_S3_BUCKET'],
ContentDisposition='attachment; filename="{}"'.format(fs.filename),
Key=key,
)
| [
"devunt@gmail.com"
] | devunt@gmail.com |
3e0e8742d55a9c6fc96366fb5ebe8ab2bab27bff | bfd7de2bf935e969ef64431c3760369525ea9db5 | /백준/1차원배열/OX퀴즈.py | 6eadfa9df4e06df0e2644a435e26bb342aab4aa1 | [] | no_license | greenhelix/AlgorithmStudy | 29923de1c2bf4c484b6ea7070af1868b14c9acf1 | 04019cba5c2de2e1ce472420401952ed96087c96 | refs/heads/master | 2023-05-11T10:51:13.521785 | 2023-05-10T15:05:20 | 2023-05-10T15:05:20 | 238,429,809 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # 백준 8958번
# OOXXOXXOOO 같이 문제의 결과가 있다.
# 연속되는 0의 등장하면 점수를 연속된 0의 수만큼 계산
# input>>
# 5
# OOXXOXXOOO
# OOXXOOXXOO
# OXOXOXOXOXOXOX
# OOOOOOOOOO
# OOOOXOOOOXOOOOX
# output >>
# 10
# 9
# 7
# 55
# 30
n = int(input())
a = []
for t in range(n):
a.append(str(input()))
for i in range(n):
quiz = list(a[i])
count = 0
result = []
un = 0
for j in quiz:
if j == 'X':
un = 1
count = 0
elif j == 'O':
un = 0
if un == 0:
count += 1
result.append(count)
print(sum(result))
| [
"dlrghks4444@gmail.com"
] | dlrghks4444@gmail.com |
b0b95374a9a79730f59536a506870227180732e9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_Pyranja_counting_sheep.py | 91081f1789d16088e1118c785a1fc6233bb1685b | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 727 | py | #!/usr/bin/env python3
import sys, logging
"""codejam 2016 - counting sheep"""
def solve(n):
if n == 0:
return 'INSOMNIA'
seen = set()
current = n
while True:
for c in str(current):
seen.add(c)
if len(seen) >= 10:
return current
current += n
# ========================================= boilerplate ========================================== #
def main(cases):
return '\n'.join([formatCase(idx, solve(int(case))) for (idx, case) in enumerate(cases, 1)])
def formatCase(idx, answer):
return 'Case #{0}: {1}'.format(idx, answer)
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
print(main(sys.stdin.readlines()[1:]))
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
a97468b141f51d5c17a5e4ae5faa346f69e48735 | a9b8f84c55aa64d4721de11e34e6fc300453be1b | /public/packages/pymongo/v26/pymongo/common.py | c64de1df07b5565ae3e55f007cf8fe7f666dc3f6 | [] | no_license | xuning992/tfty | f17273db407bb5ca87f583b114a42eb8e83d67fc | 20785621b933d2d6bdc293e953710faef4268bf6 | refs/heads/master | 2022-12-13T22:39:14.696326 | 2017-11-19T15:23:11 | 2017-11-19T15:23:11 | 111,306,251 | 0 | 0 | null | 2022-07-05T21:08:37 | 2017-11-19T15:11:40 | Python | UTF-8 | Python | false | false | 23,381 | py | # Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import sys
import warnings
from . import read_preferences
from .auth import MECHANISMS
from .read_preferences import ReadPreference
from .errors import ConfigurationError
HAS_SSL = True
try:
import ssl
except ImportError:
HAS_SSL = False
# Jython 2.7 includes an incomplete ssl module. See PYTHON-498.
if sys.platform.startswith('java'):
HAS_SSL = False
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must be a boolean" % (option,))
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if not value.isdigit():
raise ConfigurationError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer.
"""
val = validate_integer(option, value)
if val < 0:
raise ConfigurationError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_basestring(option, value)
open(value, 'r').close()
return value
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the three
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
if value is None:
return value
if HAS_SSL:
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ConfigurationError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED" % (option,))
else:
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_basestring(option, value):
"""Validates that 'value' is an instance of `basestring`.
"""
if isinstance(value, basestring):
return value
raise TypeError("Wrong type for %s, value must be an "
"instance of %s" % (option, basestring.__name__))
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
err = ConfigurationError("%s must be a positive int or float" % (option,))
try:
value = float(value)
except (ValueError, TypeError):
raise err
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise err
return value
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate read preference for a ReplicaSetConnection.
"""
if value in read_preferences.modes:
return value
# Also allow string form of enum for uri_parser
try:
return read_preferences.mongos_enum(value)
except ValueError:
raise ConfigurationError("Not a valid read preference")
def validate_tag_sets(dummy, value):
"""Validate tag sets for a ReplicaSetConnection.
"""
if value is None:
return [{}]
if not isinstance(value, list):
raise ConfigurationError((
"Tag sets %s invalid, must be a list" ) % repr(value))
if len(value) == 0:
raise ConfigurationError((
"Tag sets %s invalid, must be None or contain at least one set of"
" tags") % repr(value))
for tags in value:
if not isinstance(tags, dict):
raise ConfigurationError(
"Tag set %s invalid, must be a dict" % repr(tags))
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
if value not in MECHANISMS:
raise ConfigurationError("%s must be in "
"%s" % (option, MECHANISMS))
return value
# jounal is an alias for j,
# wtimeoutms is an alias for wtimeout
VALIDATORS = {
'replicaset': validate_basestring,
'slaveok': validate_boolean,
'slave_okay': validate_boolean,
'safe': validate_boolean,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean,
'j': validate_boolean,
'journal': validate_boolean,
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_positive_integer_or_none,
'ssl': validate_boolean,
'ssl_keyfile': validate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'readpreference': validate_read_preference,
'read_preference': validate_read_preference,
'tag_sets': validate_tag_sets,
'secondaryacceptablelatencyms': validate_positive_float,
'secondary_acceptable_latency_ms': validate_positive_float,
'auto_start_request': validate_boolean,
'use_greenlets': validate_boolean,
'authmechanism': validate_auth_mechanism,
'authsource': validate_basestring,
'gssapiservicename': validate_basestring,
}
_AUTH_OPTIONS = frozenset(['gssapiservicename'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentication option: %s' % (option,))
return lower, value
def validate(option, value):
"""Generic validation function.
"""
lower = option.lower()
validator = VALIDATORS.get(lower, raise_config_error)
value = validator(option, value)
return lower, value
SAFE_OPTIONS = frozenset([
'w',
'wtimeout',
'wtimeoutms',
'fsync',
'j',
'journal'
])
class WriteConcern(dict):
def __init__(self, *args, **kwargs):
"""A subclass of dict that overrides __setitem__ to
validate write concern options.
"""
super(WriteConcern, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if key not in SAFE_OPTIONS:
raise ConfigurationError("%s is not a valid write "
"concern option." % (key,))
key, value = validate(key, value)
super(WriteConcern, self).__setitem__(key, value)
class BaseObject(object):
"""A base class that provides attributes and methods common
to multiple pymongo classes.
SHOULD NOT BE USED BY DEVELOPERS EXTERNAL TO 10GEN
"""
def __init__(self, **options):
self.__slave_okay = False
self.__read_pref = ReadPreference.PRIMARY
self.__tag_sets = [{}]
self.__secondary_acceptable_latency_ms = 15
self.__safe = None
self.__write_concern = WriteConcern()
self.__set_options(options)
if (self.__read_pref == ReadPreference.PRIMARY
and self.__tag_sets != [{}]
):
raise ConfigurationError(
"ReadPreference PRIMARY cannot be combined with tags")
# If safe hasn't been implicitly set by write concerns then set it.
if self.__safe is None:
if options.get("w") == 0:
self.__safe = False
else:
self.__safe = validate_boolean('safe', options.get("safe", True))
# Note: 'safe' is always passed by Connection and ReplicaSetConnection
# Always do the most "safe" thing, but warn about conflicts.
if self.__safe and options.get('w') == 0:
warnings.warn("Conflicting write concerns. 'w' set to 0 "
"but other options have enabled write concern. "
"Please set 'w' to a value other than 0.",
UserWarning)
def __set_safe_option(self, option, value):
"""Validates and sets getlasterror options for this
object (Connection, Database, Collection, etc.)
"""
if value is None:
self.__write_concern.pop(option, None)
else:
self.__write_concern[option] = value
if option != "w" or value != 0:
self.__safe = True
def __set_options(self, options):
"""Validates and sets all options passed to this object."""
for option, value in options.iteritems():
if option in ('slave_okay', 'slaveok'):
self.__slave_okay = validate_boolean(option, value)
elif option in ('read_preference', "readpreference"):
self.__read_pref = validate_read_preference(option, value)
elif option == 'tag_sets':
self.__tag_sets = validate_tag_sets(option, value)
elif option in (
'secondaryacceptablelatencyms',
'secondary_acceptable_latency_ms'
):
self.__secondary_acceptable_latency_ms = \
validate_positive_float(option, value)
elif option in SAFE_OPTIONS:
if option == 'journal':
self.__set_safe_option('j', value)
elif option == 'wtimeoutms':
self.__set_safe_option('wtimeout', value)
else:
self.__set_safe_option(option, value)
def __set_write_concern(self, value):
"""Property setter for write_concern."""
if not isinstance(value, dict):
raise ConfigurationError("write_concern must be an "
"instance of dict or a subclass.")
# Make a copy here to avoid users accidentally setting the
# same dict on multiple instances.
wc = WriteConcern()
for k, v in value.iteritems():
# Make sure we validate each option.
wc[k] = v
self.__write_concern = wc
def __get_write_concern(self):
"""The default write concern for this instance.
Supports dict style access for getting/setting write concern
options. Valid options include:
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). **Setting w=0 disables write
acknowledgement and all other write concern options.**
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Ignored if the server is running without journaling.
- `fsync`: If ``True`` force the database to fsync all files before
returning. When used with `j` the server awaits the next group
commit before returning.
>>> m = pymongo.MongoClient()
>>> m.write_concern
{}
>>> m.write_concern = {'w': 2, 'wtimeout': 1000}
>>> m.write_concern
{'wtimeout': 1000, 'w': 2}
>>> m.write_concern['j'] = True
>>> m.write_concern
{'wtimeout': 1000, 'j': True, 'w': 2}
>>> m.write_concern = {'j': True}
>>> m.write_concern
{'j': True}
>>> # Disable write acknowledgement and write concern
...
>>> m.write_concern['w'] = 0
.. note:: Accessing :attr:`write_concern` returns its value
(a subclass of :class:`dict`), not a copy.
.. warning:: If you are using :class:`~pymongo.connection.Connection`
or :class:`~pymongo.replica_set_connection.ReplicaSetConnection`
make sure you explicitly set ``w`` to 1 (or a greater value) or
:attr:`safe` to ``True``. Unlike calling
:meth:`set_lasterror_options`, setting an option in
:attr:`write_concern` does not implicitly set :attr:`safe`
to ``True``.
"""
# To support dict style access we have to return the actual
# WriteConcern here, not a copy.
return self.__write_concern
write_concern = property(__get_write_concern, __set_write_concern)
def __get_slave_okay(self):
"""DEPRECATED. Use :attr:`read_preference` instead.
.. versionchanged:: 2.1
Deprecated slave_okay.
.. versionadded:: 2.0
"""
return self.__slave_okay
def __set_slave_okay(self, value):
"""Property setter for slave_okay"""
warnings.warn("slave_okay is deprecated. Please use "
"read_preference instead.", DeprecationWarning,
stacklevel=2)
self.__slave_okay = validate_boolean('slave_okay', value)
slave_okay = property(__get_slave_okay, __set_slave_okay)
def __get_read_pref(self):
"""The read preference mode for this instance.
See :class:`~pymongo.read_preferences.ReadPreference` for available options.
.. versionadded:: 2.1
"""
return self.__read_pref
def __set_read_pref(self, value):
"""Property setter for read_preference"""
self.__read_pref = validate_read_preference('read_preference', value)
read_preference = property(__get_read_pref, __set_read_pref)
def __get_acceptable_latency(self):
"""Any replica-set member whose ping time is within
secondary_acceptable_latency_ms of the nearest member may accept
reads. Defaults to 15 milliseconds.
See :class:`~pymongo.read_preferences.ReadPreference`.
.. versionadded:: 2.3
.. note:: ``secondary_acceptable_latency_ms`` is ignored when talking to a
replica set *through* a mongos. The equivalent is the localThreshold_ command
line option.
.. _localThreshold: http://docs.mongodb.org/manual/reference/mongos/#cmdoption-mongos--localThreshold
"""
return self.__secondary_acceptable_latency_ms
def __set_acceptable_latency(self, value):
"""Property setter for secondary_acceptable_latency_ms"""
self.__secondary_acceptable_latency_ms = (validate_positive_float(
'secondary_acceptable_latency_ms', value))
secondary_acceptable_latency_ms = property(
__get_acceptable_latency, __set_acceptable_latency)
def __get_tag_sets(self):
"""Set ``tag_sets`` to a list of dictionaries like [{'dc': 'ny'}] to
read only from members whose ``dc`` tag has the value ``"ny"``.
To specify a priority-order for tag sets, provide a list of
tag sets: ``[{'dc': 'ny'}, {'dc': 'la'}, {}]``. A final, empty tag
set, ``{}``, means "read from any member that matches the mode,
ignoring tags." ReplicaSetConnection tries each set of tags in turn
until it finds a set of tags with at least one matching member.
.. seealso:: `Data-Center Awareness
<http://www.mongodb.org/display/DOCS/Data+Center+Awareness>`_
.. versionadded:: 2.3
"""
return self.__tag_sets
def __set_tag_sets(self, value):
"""Property setter for tag_sets"""
self.__tag_sets = validate_tag_sets('tag_sets', value)
tag_sets = property(__get_tag_sets, __set_tag_sets)
def __get_safe(self):
"""**DEPRECATED:** Use the 'w' :attr:`write_concern` option instead.
Use getlasterror with every write operation?
.. versionadded:: 2.0
"""
return self.__safe
def __set_safe(self, value):
"""Property setter for safe"""
warnings.warn("safe is deprecated. Please use the"
" 'w' write_concern option instead.",
DeprecationWarning, stacklevel=2)
self.__safe = validate_boolean('safe', value)
safe = property(__get_safe, __set_safe)
def get_lasterror_options(self):
"""DEPRECATED: Use :attr:`write_concern` instead.
Returns a dict of the getlasterror options set on this instance.
.. versionchanged:: 2.4
Deprecated get_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("get_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
return self.__write_concern.copy()
def set_lasterror_options(self, **kwargs):
"""DEPRECATED: Use :attr:`write_concern` instead.
Set getlasterror options for this instance.
Valid options include j=<bool>, w=<int/string>, wtimeout=<int>,
and fsync=<bool>. Implies safe=True.
:Parameters:
- `**kwargs`: Options should be passed as keyword
arguments (e.g. w=2, fsync=True)
.. versionchanged:: 2.4
Deprecated set_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("set_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
for key, value in kwargs.iteritems():
self.__set_safe_option(key, value)
def unset_lasterror_options(self, *options):
"""DEPRECATED: Use :attr:`write_concern` instead.
Unset getlasterror options for this instance.
If no options are passed unsets all getlasterror options.
This does not set `safe` to False.
:Parameters:
- `*options`: The list of options to unset.
.. versionchanged:: 2.4
Deprecated unset_lasterror_options.
.. versionadded:: 2.0
"""
warnings.warn("unset_lasterror_options is deprecated. Please use "
"write_concern instead.", DeprecationWarning,
stacklevel=2)
if len(options):
for option in options:
self.__write_concern.pop(option, None)
else:
self.__write_concern = WriteConcern()
def _get_wc_override(self):
"""Get write concern override.
Used in internal methods that **must** do acknowledged write ops.
We don't want to override user write concern options if write concern
is already enabled.
"""
if self.safe and self.__write_concern.get('w') != 0:
return {}
return {'w': 1}
def _get_write_mode(self, safe=None, **options):
"""Get the current write mode.
Determines if the current write is safe or not based on the
passed in or inherited safe value, write_concern values, or
passed options.
:Parameters:
- `safe`: check that the operation succeeded?
- `**options`: overriding write concern options.
.. versionadded:: 2.3
"""
# Don't ever send w=1 to the server.
def pop1(dct):
if dct.get('w') == 1:
dct.pop('w')
return dct
if safe is not None:
warnings.warn("The safe parameter is deprecated. Please use "
"write concern options instead.", DeprecationWarning,
stacklevel=3)
validate_boolean('safe', safe)
# Passed options override collection level defaults.
if safe is not None or options:
if safe or options:
if not options:
options = self.__write_concern.copy()
# Backwards compatability edge case. Call getLastError
# with no options if safe=True was passed but collection
# level defaults have been disabled with w=0.
# These should be equivalent:
# Connection(w=0).foo.bar.insert({}, safe=True)
# MongoClient(w=0).foo.bar.insert({}, w=1)
if options.get('w') == 0:
return True, {}
# Passing w=0 overrides passing safe=True.
return options.get('w') != 0, pop1(options)
return False, {}
# Fall back to collection level defaults.
# w=0 takes precedence over self.safe = True
if self.__write_concern.get('w') == 0:
return False, {}
elif self.safe or self.__write_concern.get('w', 0) != 0:
return True, pop1(self.__write_concern.copy())
return False, {}
| [
"xuning992@163.com"
] | xuning992@163.com |
c37c536fc92fc60bf9634767a43b3d98a1823423 | a995e950a202251be3c674f4b7df3fc0111b0c9b | /isur2/urls.py | 3681e1f549407dbfc8d61c91b00c841a97ae6509 | [] | no_license | asmuratbek/isurlessons | 83188f1ba3469cb92274501fe466bbafc07281e9 | 8fd17b6f57ee16c6cee3361808256f58618fd5ce | refs/heads/master | 2021-01-20T13:17:10.068541 | 2017-05-13T14:17:43 | 2017-05-13T14:17:43 | 90,466,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,164 | py | """isur2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
import comments
import news.urls
from isur2 import settings
from social.views import *
urlpatterns = [
url(r'^jet/', include('jet.urls', 'jet')),
url(r'^jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')),
url(r'^admin/', admin.site.urls),
url(r'^$', index, name='index'),
url(r'^blog/$', BlogListView.as_view(), name='blog'),
url(r'^thanks/news/', include('news.urls', namespace='create_news')),
url(r'^thanks/blog/', include('blog.urls', namespace='create_blog')),
url(r'^news/$', NewsListView.as_view(), name='news'),
url(r'^news/create/', NewsCreateView.as_view(), name='news_create'),
url(r'^blog/create/', BlogCreateView.as_view(), name='blog_create'),
url(r'^blog/(?P<pk>[0-9]+)/update/$', BlogUpdateView.as_view(), name='blog_update'),
# url(r'^news/get/(?P<pk>[0-9]+)$', NewsDetailView.as_view(), name='get_news'),
url(r'^news/(?P<id>[0-9]+)$', add_news_comment, name='get_news'),
url(r'^blog/get/(?P<id>[0-9]+)$', get_blog, name='get_blog'),
url(r'^comments/add$', comments_add, name='comments_add'),
url(r'^comments/$', comments_all, name='comments_all'),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns += staticfiles_urlpatterns()
| [
"asmuratbek@gmail.com"
] | asmuratbek@gmail.com |
944ba90b1bb81dd83b295172537f36ca3534888c | 2e676811db543030bf35b948012570a421683c17 | /tensor2tensor/utils/decoding.py | d84fd740be53eacabaafe945c79042b8b9c2f678 | [
"Apache-2.0"
] | permissive | laulysta/tensor2tensor | 56ecf42e516f5c27c8c02c4d31ea7b0523764cf8 | 4c4245c160269b53816e198d9c7563f016b94793 | refs/heads/master | 2021-07-01T03:25:35.557372 | 2017-09-21T23:57:46 | 2017-09-21T23:57:46 | 104,119,810 | 0 | 0 | null | 2017-09-19T19:36:02 | 2017-09-19T19:36:02 | null | UTF-8 | Python | false | false | 21,439 | py | # coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoding utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import os
# Dependency imports
import numpy as np
import six
from six.moves import input # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import data_reader
from tensor2tensor.utils import devices
from tensor2tensor.utils import input_fn_builder
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# Number of samples to draw for an image input (in such cases as captioning)
IMAGE_DECODE_LENGTH = 100
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = tf.contrib.training.HParams(
use_last_position_only=False,
save_images=False,
problem_idx=0,
extra_length=50,
batch_size=32,
beam_size=4,
alpha=0.6,
return_beams=False,
max_input_size=-1,
identity_output=False,
num_samples=-1)
hp = hp.parse(overrides)
return hp
def log_decode_results(inputs,
outputs,
problem_name,
prediction_idx,
inputs_vocab,
targets_vocab,
targets=None,
save_images=False,
model_dir=None,
identity_output=False):
"""Log inference results."""
if "image" in problem_name and save_images:
save_path = os.path.join(model_dir, "%s_prediction_%d.jpg" %
(problem_name, prediction_idx))
show_and_save_image(inputs / 255., save_path)
elif inputs_vocab:
decoded_inputs = inputs_vocab.decode(_save_until_eos(inputs.flatten()))
tf.logging.info("Inference results INPUT: %s" % decoded_inputs)
decoded_targets = None
if identity_output:
decoded_outputs = "".join(map(str, outputs.flatten()))
if targets is not None:
decoded_targets = "".join(map(str, targets.flatten()))
else:
decoded_outputs = "".join(
map(str, targets_vocab.decode(_save_until_eos(outputs.flatten()))))
if targets is not None:
decoded_targets = "".join(
map(str, targets_vocab.decode(_save_until_eos(targets.flatten()))))
tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs)
if targets is not None:
tf.logging.info("Inference results TARGET: %s" % decoded_targets)
return decoded_outputs, decoded_targets
def decode_from_dataset(estimator,
problem_names,
decode_hp,
decode_to_file=None):
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_names))
hparams = estimator.params
for problem_idx, problem_name in enumerate(problem_names):
# Build the inference input function
infer_problems_data = data_reader.get_data_filepatterns(
problem_name, hparams.data_dir, tf.estimator.ModeKeys.PREDICT)
infer_input_fn = input_fn_builder.build_input_fn(
mode=tf.estimator.ModeKeys.PREDICT,
hparams=hparams,
data_file_patterns=infer_problems_data,
num_datashards=devices.data_parallelism().n,
fixed_problem=problem_idx)
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn)
# Prepare output file writers if decode_to_file passed
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name,
decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
problem_hparams = hparams.problems[problem_idx]
inputs_vocab = problem_hparams.vocabulary.get("inputs", None)
targets_vocab = problem_hparams.vocabulary["targets"]
for num_predictions, prediction in enumerate(predictions):
inputs = prediction["inputs"]
targets = prediction["targets"]
outputs = prediction["outputs"]
# Log predictions
decoded_outputs = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
model_dir=estimator.model_dir,
identity_output=decode_hp.identity_output,
targets=targets)
decoded_outputs.append(decoded)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
model_dir=estimator.model_dir,
identity_output=decode_hp.identity_output,
targets=targets)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for decoded_output, decoded_target in decoded_outputs:
output_file.write(str(decoded_output) + "\n")
target_file.write(str(decoded_target) + "\n")
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
if decode_to_file:
output_file.close()
target_file.close()
tf.logging.info("Completed inference on %d samples." % num_predictions) # pylint: disable=undefined-loop-variable
def decode_from_file(estimator, filename, decode_hp, decode_to_file=None):
"""Compute predictions on entries in filename and write them out."""
hparams = estimator.params
problem_id = decode_hp.problem_idx
inputs_vocab = hparams.problems[problem_id].vocabulary["inputs"]
targets_vocab = hparams.problems[problem_id].vocabulary["targets"]
problem_name = FLAGS.problems.split("-")[problem_id]
tf.logging.info("Performing decoding from a file.")
sorted_inputs, sorted_keys = _get_sorted_inputs(filename, decode_hp.shards)
num_decode_batches = (len(sorted_inputs) - 1) // decode_hp.batch_size + 1
def input_fn():
input_gen = _decode_batch_input_fn(
problem_id, num_decode_batches, sorted_inputs, inputs_vocab,
decode_hp.batch_size, decode_hp.max_input_size)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn)
for result in result_iter:
if decode_hp.return_beams:
beam_decodes = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
decoded_outputs, _ = log_decode_results(result["inputs"], beam,
problem_name, None,
inputs_vocab, targets_vocab)
beam_decodes.append(decoded_outputs)
decodes.append("\t".join(beam_decodes))
else:
decoded_outputs, _ = log_decode_results(result["inputs"],
result["outputs"], problem_name,
None, inputs_vocab, targets_vocab)
decodes.append(decoded_outputs)
# Reversing the decoded inputs and outputs because they were reversed in
# _decode_batch_input_fn
sorted_inputs.reverse()
decodes.reverse()
# Dumping inputs and outputs to file filename.decodes in
# format result\tinput in the same order as original inputs
if decode_to_file:
output_filename = decode_to_file
else:
output_filename = filename
if decode_hp.shards > 1:
base_filename = output_filename + ("%.2d" % FLAGS.worker_id)
else:
base_filename = output_filename
decode_filename = _decode_filename(base_filename, problem_name, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s\n" % (decodes[sorted_keys[index]]))
def _decode_filename(base_filename, problem_name, decode_hp):
return "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha))
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
def decode_interactively(estimator, decode_hp):
"""Interactive decoding."""
hparams = estimator.params
def input_fn():
gen_fn = make_input_fn_from_generator(_interactive_input_fn(hparams))
example = gen_fn()
example = _interactive_input_tensor_to_features_dict(example, hparams)
return example
result_iter = estimator.predict(input_fn)
for result in result_iter:
problem_idx = result["problem_choice"]
targets_vocab = hparams.problems[problem_idx].vocabulary["targets"]
if decode_hp.return_beams:
beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = targets_vocab.decode(_save_until_eos(beam.flatten()))
if scores is not None:
tf.logging.info("%s\tScore:%f" % (beam_string, scores[k]))
else:
tf.logging.info(beam_string)
else:
if decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
tf.logging.info(
targets_vocab.decode(_save_until_eos(result["outputs"].flatten())))
def _decode_batch_input_fn(problem_id, num_decode_batches, sorted_inputs,
vocabulary, batch_size, max_input_size):
tf.logging.info(" batch %d" % num_decode_batches)
# First reverse all the input sentences so that if you're going to get OOMs,
# you'll see it in the first batch
sorted_inputs.reverse()
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
input_ids.append(text_encoder.EOS_ID)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32),
}
def _interactive_input_fn(hparams):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = 1
decode_length = 100
input_type = "text"
problem_id = 0
p_hparams = hparams.problems[problem_id]
has_input = "inputs" in p_hparams.input_modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" pr=<problem_num> (set the problem number, default: 0)\n"
" in=<input_problem> (set the input problem number)\n"
" ou=<output_problem> (set the output problem number)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length, "source_string"
if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "pr=":
problem_id = int(input_string[3:])
p_hparams = hparams.problems[problem_id]
has_input = "inputs" in p_hparams.input_modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
elif input_string[:3] == "in=":
problem = int(input_string[3:])
p_hparams.input_modality = hparams.problems[problem].input_modality
p_hparams.input_space_id = hparams.problems[problem].input_space_id
elif input_string[:3] == "ou=":
problem = int(input_string[3:])
p_hparams.target_modality = hparams.problems[problem].target_modality
p_hparams.target_space_id = hparams.problems[problem].target_space_id
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
yield {
"inputs": np.array(x).astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32)
}
elif input_type == "image":
input_path = input_string
img = read_image(input_path)
yield {
"inputs": img.astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32)
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
yield {
"inputs": np.array(x).astype(np.int32),
"problem_choice": np.array(problem_id).astype(np.int32)
}
else:
raise Exception("Unsupported input type.")
def read_image(path):
try:
import matplotlib.image as im # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Reading an image requires matplotlib to be installed: %s", e)
raise NotImplementedError("Image reading not implemented.")
return im.imread(path)
def show_and_save_image(img, save_path):
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning("Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
plt.savefig(save_path)
def _get_sorted_inputs(filename, num_shards=1):
"""Returning inputs sorted according to length.
Args:
filename: path to file with inputs, 1 per line.
num_shards: number of input shards. If > 1, will read from file filename.XX,
where XX is FLAGS.worker_id.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
# read file and sort inputs according them according to input length.
if num_shards > 1:
decode_filename = filename + ("%.2d" % FLAGS.worker_id)
else:
decode_filename = filename
inputs = [line.strip() for line in tf.gfile.Open(decode_filename)]
input_lens = [(i, len(line.strip().split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys
def _save_until_eos(hyp):
"""Strips everything after the first <EOS> token, which is normally 1."""
try:
index = list(hyp).index(text_encoder.EOS_ID)
return hyp[0:index]
except ValueError:
# No EOS_ID: return the array as-is.
return hyp
def _interactive_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: a dictionary with keys `problem_choice` and `input` containing
Tensors.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False if len(inputs.get_shape()) < 3 else True
def input_fn(problem_choice, x=inputs): # pylint: disable=missing-docstring
if input_is_image:
x = tf.image.resize_images(x, [299, 299])
x = tf.reshape(x, [1, 299, 299, -1])
x = tf.to_int32(x)
else:
# Remove the batch dimension.
num_samples = x[0]
length = x[2]
x = tf.slice(x, [3], tf.to_int32([length]))
x = tf.reshape(x, [1, -1, 1, 1])
# Transform into a batch of size num_samples to get that many random
# decodes.
x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))
p_hparams = hparams.problems[problem_choice]
return (tf.constant(p_hparams.input_space_id),
tf.constant(p_hparams.target_space_id), x)
input_space_id, target_space_id, x = input_fn_builder.cond_on_index(
input_fn, feature_map["problem_choice"], len(hparams.problems) - 1)
features = {}
features["problem_choice"] = tf.convert_to_tensor(
feature_map["problem_choice"])
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (IMAGE_DECODE_LENGTH
if input_is_image else inputs[1])
features["inputs"] = x
return features
def _decode_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: a dictionary with keys `problem_choice` and `input` containing
Tensors.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False
def input_fn(problem_choice, x=inputs): # pylint: disable=missing-docstring
p_hparams = hparams.problems[problem_choice]
# Add a third empty dimension dimension
x = tf.expand_dims(x, axis=[2])
x = tf.to_int32(x)
return (tf.constant(p_hparams.input_space_id),
tf.constant(p_hparams.target_space_id), x)
input_space_id, target_space_id, x = input_fn_builder.cond_on_index(
input_fn, feature_map["problem_choice"], len(hparams.problems) - 1)
features = {}
features["problem_choice"] = feature_map["problem_choice"]
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (IMAGE_DECODE_LENGTH
if input_is_image else tf.shape(x)[1] + 50)
features["inputs"] = x
return features
| [
"rsepassi@google.com"
] | rsepassi@google.com |
20398c6e71f8ec8412661b985f222cd7d226d5d3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/databoxedge/v20200901/share.py | df5018071629f9b91ddffa40e82cdcdda468e151 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,808 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ShareArgs', 'Share']
@pulumi.input_type
class ShareArgs:
def __init__(__self__, *,
access_protocol: pulumi.Input[Union[str, 'ShareAccessProtocol']],
device_name: pulumi.Input[str],
monitoring_status: pulumi.Input[Union[str, 'MonitoringStatus']],
resource_group_name: pulumi.Input[str],
share_status: pulumi.Input[Union[str, 'ShareStatus']],
azure_container_info: Optional[pulumi.Input['AzureContainerInfoArgs']] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input['RefreshDetailsArgs']] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]] = None):
"""
The set of arguments for constructing a Share resource.
:param pulumi.Input[Union[str, 'ShareAccessProtocol']] access_protocol: Access protocol to be used by the share.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Current monitoring status of the share.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'ShareStatus']] share_status: Current status of the share.
:param pulumi.Input['AzureContainerInfoArgs'] azure_container_info: Azure container mapping for the share.
:param pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]] client_access_rights: List of IP addresses and corresponding access rights on the share(required for NFS protocol).
:param pulumi.Input[Union[str, 'DataPolicy']] data_policy: Data policy of the share.
:param pulumi.Input[str] description: Description for the share.
:param pulumi.Input[str] name: The share name.
:param pulumi.Input['RefreshDetailsArgs'] refresh_details: Details of the refresh job on this share.
:param pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]] user_access_rights: Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
pulumi.set(__self__, "access_protocol", access_protocol)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "monitoring_status", monitoring_status)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_status", share_status)
if azure_container_info is not None:
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights is not None:
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy is not None:
pulumi.set(__self__, "data_policy", data_policy)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if refresh_details is not None:
pulumi.set(__self__, "refresh_details", refresh_details)
if user_access_rights is not None:
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Input[Union[str, 'ShareAccessProtocol']]:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@access_protocol.setter
def access_protocol(self, value: pulumi.Input[Union[str, 'ShareAccessProtocol']]):
pulumi.set(self, "access_protocol", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Input[Union[str, 'MonitoringStatus']]:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@monitoring_status.setter
def monitoring_status(self, value: pulumi.Input[Union[str, 'MonitoringStatus']]):
pulumi.set(self, "monitoring_status", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Input[Union[str, 'ShareStatus']]:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@share_status.setter
def share_status(self, value: pulumi.Input[Union[str, 'ShareStatus']]):
pulumi.set(self, "share_status", value)
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional[pulumi.Input['AzureContainerInfoArgs']]:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@azure_container_info.setter
def azure_container_info(self, value: Optional[pulumi.Input['AzureContainerInfoArgs']]):
pulumi.set(self, "azure_container_info", value)
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@client_access_rights.setter
def client_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]):
pulumi.set(self, "client_access_rights", value)
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[pulumi.Input[Union[str, 'DataPolicy']]]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@data_policy.setter
def data_policy(self, value: Optional[pulumi.Input[Union[str, 'DataPolicy']]]):
pulumi.set(self, "data_policy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The share name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional[pulumi.Input['RefreshDetailsArgs']]:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@refresh_details.setter
def refresh_details(self, value: Optional[pulumi.Input['RefreshDetailsArgs']]):
pulumi.set(self, "refresh_details", value)
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
@user_access_rights.setter
def user_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]):
pulumi.set(self, "user_access_rights", value)
class Share(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
"""
Represents a share on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'ShareAccessProtocol']] access_protocol: Access protocol to be used by the share.
:param pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']] azure_container_info: Azure container mapping for the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]] client_access_rights: List of IP addresses and corresponding access rights on the share(required for NFS protocol).
:param pulumi.Input[Union[str, 'DataPolicy']] data_policy: Data policy of the share.
:param pulumi.Input[str] description: Description for the share.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Current monitoring status of the share.
:param pulumi.Input[str] name: The share name.
:param pulumi.Input[pulumi.InputType['RefreshDetailsArgs']] refresh_details: Details of the refresh job on this share.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'ShareStatus']] share_status: Current status of the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]] user_access_rights: Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a share on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param ShareArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareArgs.__new__(ShareArgs)
if access_protocol is None and not opts.urn:
raise TypeError("Missing required property 'access_protocol'")
__props__.__dict__["access_protocol"] = access_protocol
__props__.__dict__["azure_container_info"] = azure_container_info
__props__.__dict__["client_access_rights"] = client_access_rights
__props__.__dict__["data_policy"] = data_policy
__props__.__dict__["description"] = description
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if monitoring_status is None and not opts.urn:
raise TypeError("Missing required property 'monitoring_status'")
__props__.__dict__["monitoring_status"] = monitoring_status
__props__.__dict__["name"] = name
__props__.__dict__["refresh_details"] = refresh_details
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_status is None and not opts.urn:
raise TypeError("Missing required property 'share_status'")
__props__.__dict__["share_status"] = share_status
__props__.__dict__["user_access_rights"] = user_access_rights
__props__.__dict__["share_mappings"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:databoxedge:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200501preview:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210601:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210601preview:Share")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Share, __self__).__init__(
'azure-native:databoxedge/v20200901:Share',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Share':
"""
Get an existing Share resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ShareArgs.__new__(ShareArgs)
__props__.__dict__["access_protocol"] = None
__props__.__dict__["azure_container_info"] = None
__props__.__dict__["client_access_rights"] = None
__props__.__dict__["data_policy"] = None
__props__.__dict__["description"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["refresh_details"] = None
__props__.__dict__["share_mappings"] = None
__props__.__dict__["share_status"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_access_rights"] = None
return Share(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Output[str]:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> pulumi.Output[Optional['outputs.AzureContainerInfoResponse']]:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.ClientAccessRightResponse']]]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> pulumi.Output[Optional[str]]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> pulumi.Output[Optional['outputs.RefreshDetailsResponse']]:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> pulumi.Output[Sequence['outputs.MountPointMapResponse']]:
"""
Share mount point to the role.
"""
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Output[str]:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Share on ASE device
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.UserAccessRightResponse']]]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
52178c5004f54c1e21eb8c03f81c52949282cbcb | 7002368b209d45dc6f076cab4064fecb1d2cb28d | /openimu/utils.py | 70ba7f1214eed1487d289fc1195e70ec63398c4f | [] | no_license | klaird/python-openimu | 970269769b6246256f45b8c4ed4a7b9cd9b6da66 | 936942491f02d377d152cf19032b380273dadef2 | refs/heads/master | 2022-04-17T17:28:37.192828 | 2020-03-25T04:57:09 | 2020-03-25T04:57:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import sys
import os
import pkgutil
def is_in_bundle():
return hasattr(sys, 'frozen') and getattr(sys, 'frozen') and hasattr(sys, '_MEIPASS')
def get_executor_path():
if is_in_bundle():
path = os.path.abspath(os.path.dirname(sys.executable))
else:
path = os.path.join(os.path.expanduser('~'),'openimu') #sys.path[0]
if not os.path.isdir(path):
os.makedirs(path)
return path
def get_content_from_bundle(package, path):
module_name = 'openimu'
if is_in_bundle():
content = pkgutil.get_data(package, path)
else:
content = pkgutil.get_data(module_name, os.path.join(package, path))
return content
| [
"ywsong@aceinna.com"
] | ywsong@aceinna.com |
78cbe639071b36c9dc723169f358e8c6f3a4a7e5 | dba16143d8fa6aa73ca1d4df7bcfaca42824412c | /src/year2022/day04b.py | e6f81b1ff03c0c75c6dad604b168d793845c54e3 | [
"Unlicense"
] | permissive | lancelote/advent_of_code | 84559bf633189db3c3e4008b7777b1112f7ecd30 | 4b8ac6a97859b1320f77ba0ee91168b58db28cdb | refs/heads/master | 2023-02-03T14:13:07.674369 | 2023-01-24T20:06:43 | 2023-01-24T20:06:43 | 47,609,324 | 11 | 0 | null | 2019-10-07T07:06:42 | 2015-12-08T08:35:51 | Python | UTF-8 | Python | false | false | 435 | py | """2022 - Day 4 Part 2: Camp Cleanup."""
from src.year2022.day04a import Pair
from src.year2022.day04a import process_data
def overlap(pair: Pair) -> bool:
(a1, b1), (a2, b2) = pair
return any(
[
a1 <= a2 <= b1,
a2 <= a1 <= b2,
]
)
def solve(task: str) -> int:
count = 0
for pair in process_data(task):
if overlap(pair):
count += 1
return count
| [
"lancelote.du.lac@gmail.com"
] | lancelote.du.lac@gmail.com |
78a54bb8303ec0f808abbef1226b492cd124a644 | 1564d12d61f669ce9f772f3ef7563167f7fe13bf | /codechef/august/lunchtime/chefAndTrip.py | 6b92dc5e388dbab98b60015f6c662a736547c0ea | [] | no_license | sakshamk6999/codingPractice | 73ec4873defb0f0d2e47173150a589ee12e5e0a1 | f727aac6d87448b19fc9d48660dc6978fe5edc14 | refs/heads/master | 2020-12-01T20:22:36.299535 | 2020-02-04T05:55:53 | 2020-02-04T05:55:53 | 230,757,937 | 0 | 0 | null | 2020-02-12T20:38:12 | 2019-12-29T14:00:22 | Python | UTF-8 | Python | false | false | 1,583 | py | from collections import defaultdict
for _ in range(int(input())):
n, k = map(int, input().split())
a = list(map(int, input().split()))
record = defaultdict(int)
for i in range(1, n + 1):
record[i] = 1
l = []
i = 0
temp = []
tl = 0
while i < n:
if a[i] == -1:
temp.append(-1)
i += 1
tl += 1
else:
temp.append(a[i])
l.append([temp, tl + 1])
tl = 1
temp = [a[i]]
i += 1
if a[n - 1] == -1:
l.append([temp, tl])
for i in l:
nTemp = i[1]
tempList = i[0]
if tempList[0] != -1:
record[tempList[0]] = 0
if tempList[0] != -1:
record[tempList[nTemp - 1]] = 0
initial = -1
later = -1
for i in range(1, tempList[0]):
if record[i] == 1:
initial = i
record[i] = 0
break
if initial == -1:
for i in range(tempList[0] + 1, k + 1):
if record[i] == 1:
initial = i
record[i] = 0
break
for i in range(1, tempList[0]):
if record[i] == 1:
later = i
record[i] = 0
break
if later == -1:
for i in range(tempList[0] + 1, k + 1):
if record[i] == 1:
later = i
record[i] = 0
break
if later == -1:
for j in range(nTemp):
| [
"sakshamkhatwani@gmail.com"
] | sakshamkhatwani@gmail.com |
0006254dc5982808aec74a2e9542824225d415d7 | 7e9dbc8dddc3f7e7a74adadabc39c6bc31f5208d | /in_place_rev_linked_list/rev_linked_list.py | e92141ed672ca20d465ca926f70fd25196917a55 | [] | no_license | ashish-bisht/ds_algo_handbook | 113bdb20d40e4b885791218d125aaae957243ded | a1847c8c4cc995995bc4791d093a5a7496e4b15b | refs/heads/master | 2023-01-19T02:07:42.622813 | 2020-11-25T04:16:38 | 2020-11-25T04:16:38 | 309,753,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py |
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def display(head):
while head:
print(head.value)
head = head.next
def reverse(head):
prev = None
cur = head
while cur:
nxt = cur.next
cur.next = prev
prev = cur
cur = nxt
return prev
head = Node(2)
head.next = Node(4)
head.next.next = Node(6)
head.next.next.next = Node(8)
head.next.next.next.next = Node(10)
display(head)
reversed_lst = reverse(head)
display(reversed_lst)
| [
"ashishbisht723@gmail.com"
] | ashishbisht723@gmail.com |
6e8fcc257405350e2e5a799997d018555f82b433 | c25a17f0f82c2eebca55bbe180f4c2ccbbf00292 | /03_Data_Science/1_Collection/CSV_Handle/csv_test_01.03.py | 0b0b7e514f9d843c28239b3be0f4ca1ec2808d51 | [] | no_license | superbeom97/jumpjump | a0a4da6f0df0483ef0cef9833b5fe0402ec63c9c | fc45efce2a2b00c614aa5aa54b36be1572ed40ce | refs/heads/master | 2021-09-15T09:35:16.903857 | 2018-05-30T00:00:59 | 2018-05-30T00:00:59 | 111,883,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,496 | py | import csv
import math
def get_csv_rowInstance(row_name): # 행(row) 입력 함수
row_instance = []
row_index = data[0].index(row_name)
for element_row in data[1:]:
row_instance.append(element_row[row_index])
return row_instance
def get_csv_columnInstance(primary_key): # 열(column) 입력 함수
column_instance = []
for element_column in data[1:]:
if element_column[0] == primary_key:
column_instance.append(element_column)
return column_instance[0]
def row_Print(row_instance): # 행(row) 출력 함수
for element_row_print in row_instance:
print(element_row_print)
def column_Print(element_instance): # 열(column) 입력 함수
for element_column_print in element_instance:
print(element_column_print)
def element_row_Print(row_instance): # 행(row) 요소 출력 함수
print("<<선택하신 access key값의 요소는 다음과 같습니다>>")
for element_row_print in row_instance:
print(element_row_print, end=" ")
print("")
def my_Sum(row_instance): # 총합 함수
element_row_Print(row_instance)
sum = 0
for element_row in row_instance:
sum += float(element_row)
print("총합 : %g" %sum)
def my_Average(row_instance): # 평균 함수
element_row_Print(row_instance)
sum = 0
for element_row in row_instance:
sum += float(element_row)
average_row = sum/len(row_instance)
print("평균 : %g" % average_row)
def my_Max(row_instance): # 최댓값 함수
element_row_Print(row_instance)
max_row = []
for element_row in row_instance:
max_row.append(float(element_row))
print("최댓값 : %g" % max(max_row))
def my_Min(row_instance): # 최솟값 함수
element_row_Print(row_instance)
min_row = []
for element_row in row_instance:
min_row.append(float(element_row))
print("최댓값 : %g" % min(min_row))
def my_Deviation(row_instance): # 편차 함수
element_row_Print(row_instance)
sum = 0
for element_row in row_instance:
sum += float(element_row)
average_row = sum / len(row_instance)
print("표본 편차")
for element_row_j in row_instance:
print("%-3g %3g" % (float(element_row_j), (float(element_row_j)-average_row)))
## %3g는 전체 길이가 3개인 문자열 공간에서 오른쪽 정렬하고, 그 앞의 나머지는 공백으로
## %-3g는 전체 길이가 3개인 문자열 공간에서 왼쪽 정렬하고, 그 뒤의 나머지는 공백으로
def my_Variance(row_instance): # 분산 입력 함수
#제곱의 평균 - 평균의 제곱
element_row_Print(row_instance)
square_one = 0
average_one = 0
for element_row in row_instance:
square_one += float(element_row)*float(element_row)
average_one += float(element_row)
square_one_average = square_one/len(row_instance)
average_two = average_one/len(row_instance)
average_two_square = average_two*average_two
variance_row = square_one_average - average_two_square
return variance_row
def my_Variance_Print(variance_row): # 분산 출력 함수
print("편차 : %g" % (variance_row))
def my_Standard_Deviation(variance_row): # 표준편차 출력 함수
print("표준편차 : %g" % math.sqrt(variance_row))
def my_Cendant(row_instance): # 오름차순/내림차순 입력 함수
element_row_Print(row_instance)
cendant_row = []
for element_row in row_instance:
cendant_row.append(float(element_row))
cendant_row.sort()
return cendant_row
def my_Ascendant(cendant_row): # 오름차순 출력 함수
print("<<오름차순 정렬>>")
for z in cendant_row:
print("%g" % z, end=" ")
print("")
def my_Descendant(cendant_row): # 내림차순 출력 함수
cendant_row.reverse()
print("<<내림차순 정렬>>")
for z in cendant_row:
print("%g" % z, end=" ")
print("")
with open("Demographic_Statistics_By_Zip_Code.csv", newline="") as infile:
data = list(csv.reader(infile))
while True:
number = int(input("<<원하는 서비스를 선택하시오>> \n열:1, 행:2, 총합:3, 평균:4, 최댓값:5, 최솟값:6, 편차:7, 분산:8, 표준편차:9, 오름차순:10, 내림차순:11, 종료:12\n=> "))
if number == 1:
access_key = input("구하고자 하는 열의 access key값을 입력하시오: ")
column_Print(get_csv_columnInstance("%s" %access_key))
elif number == 2:
access_key = input("구하고자 하는 행의 access key값을 입력하시오: ")
row_Print(get_csv_rowInstance("%s" % access_key))
elif number == 3:
access_key = input("총합을 원하는 행의 access key값을 입력하시오: ")
my_Sum(get_csv_rowInstance("%s" % access_key))
elif number == 4:
access_key = input("평균을 원하는 행의 access key값을 입력하시오: ")
my_Average(get_csv_rowInstance("%s" % access_key))
elif number == 5:
access_key = input("최댓값을 원하는 행의 access key값을 입력하시오: ")
my_Max(get_csv_rowInstance("%s" % access_key))
elif number == 6:
access_key = input("최솟값을 원하는 행의 access key값을 입력하시오: ")
my_Min(get_csv_rowInstance("%s" % access_key))
elif number == 7:
access_key = input("편차를 원하는 행의 access key값을 입력하시오: ")
my_Deviation(get_csv_rowInstance("%s" % access_key))
elif number == 8:
access_key = input("분산을 원하는 행의 access key값을 입력하시오: ")
my_Variance_Print(my_Variance(get_csv_rowInstance("%s" % access_key)))
elif number == 9:
access_key = input("표준편차를 원하는 행의 access key값을 입력하시오: ")
my_Standard_Deviation(my_Variance(get_csv_rowInstance("%s" % access_key)))
elif number == 10:
access_key = input("오름차순을 원하는 행의 access key값을 입력하시오: ")
my_Ascendant(my_Cendant(get_csv_rowInstance("%s" % access_key)))
elif number == 11:
access_key = input("내림차순을 원하는 행의 access key값을 입력하시오: ")
my_Descendant(my_Cendant(get_csv_rowInstance("%s" % access_key)))
elif number == 12:
print("이용해 주셔서 감사합니다!!")
break | [
"beom9790@naver.com"
] | beom9790@naver.com |
3d8e86b94f8c6ea393d43c5404233f19c3dff896 | 9d39f6ec24ea355ee82adfd4487453172953dd37 | /tao_detection_release/configs/transferred/faster_rcnn_r50_fpn_1x_lvis_reweighthead_bf.py | eeab0d968e0f56d946a0e730ab9ff8489f0f65f3 | [
"Apache-2.0"
] | permissive | feiaxyt/Winner_ECCV20_TAO | d69c0efdb1b09708c5d95c3f0a38460dedd0e65f | dc36c2cd589b096d27f60ed6f8c56941b750a0f9 | refs/heads/main | 2023-03-19T14:17:36.867803 | 2021-03-16T14:04:31 | 2021-03-16T14:04:31 | 334,864,331 | 82 | 6 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | # model settings
model = dict(
type='FasterRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='ReweightBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
reweight_cfg=dict(
cls_weight='./data/lvis/cls_weight_bf.pt',
),
roi_feat_size=7,
num_classes=1231,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
score_thr=0.0,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=300)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'LvisDataset'
data_root = 'data/lvis/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_train.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'lvis_v0.5_val.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_r50_fpn_1x_lvis_reweighthead_bs'
load_from = './work_dirs/faster_rcnn_r50_fpn_1x_lvis/latest.pth'
# load_from = './data/download_models/faster_rcnn_r50_fpn_2x_20181010-443129e1.pth'
resume_from = None
workflow = [('train', 1)]
# Train which part, 0 for all, 1 for cls, 2 for bbox_head
selectp = 1
| [
"feiaxyt@163.com"
] | feiaxyt@163.com |
1345c47a293a6a346ffd0dbc4a78ec9fb339dfcc | 01d4967b9f8605c2954a10ed7b0e1d7936022ab3 | /components/ownership.gypi | 46a7a51093e7e858d0092d1d676f77bd9e00cf3a | [
"BSD-3-Clause"
] | permissive | tmpsantos/chromium | 79c4277f98c3977c72104ecc7c5bda2f9b0295c2 | 802d4aeeb33af25c01ee5994037bbf14086d4ac0 | refs/heads/master | 2021-01-17T08:05:57.872006 | 2014-09-05T13:39:49 | 2014-09-05T13:41:43 | 16,474,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | gypi | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [{
'target_name': 'ownership',
'type': '<(component)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/crypto/crypto.gyp:crypto',
],
'defines': [
'OWNERSHIP_IMPLEMENTATION',
],
'sources': [
'ownership/mock_owner_key_util.cc',
'ownership/mock_owner_key_util.h',
'ownership/owner_key_util.cc',
'ownership/owner_key_util.h',
'ownership/owner_key_util_impl.cc',
'ownership/owner_key_util_impl.h',
],
}],
}
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
6a896d793698e10203bfb57202bdfca826183a3f | 7a2b4aca5ae841cb873e6bced7298c7884eb7e9d | /partOne.py | 6b1dbfe8a1fd8dadd50584cb64e554f06cba38c4 | [] | no_license | SunilKumar-ugra/Nueral_Networks_Tutorials | 73e297fa2df604c74c79eed1fdb2d891f9bd7fb0 | 8fa2d7f8c8bc3c6611b06a3d7860f8bfa11d5795 | refs/heads/master | 2020-08-02T22:48:04.176970 | 2019-09-28T16:53:04 | 2019-09-28T16:53:04 | 211,532,609 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # Neural Networks Demystified
# Part 1: Data + Architecture
#
# Supporting code for short YouTube series on artificial neural networks.
#
# Stephen Welch
# @stephencwelch
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2],[11,2]), dtype=float)
x = np.amax(X, axis=0)
xx = np.amax(X, axis=1)
y = np.array(([75], [82], [93]), dtype=float)
print(X)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
print(x)
print(xx) | [
"sk7899132595@gmail.com"
] | sk7899132595@gmail.com |
4e8f79161ea5ee145ef469ba6b36cf2c363e42c1 | af82475dc7eb45c478414372c222e7b6016359d4 | /python书籍/Python For Finance Code/Code of Python For Finance/4375OS_08_Code/4375OS_08_17_date_var.py | f569bdb0c198e340e01ec30e63c0fd18a5fff006 | [] | no_license | enfangzhong/PythonBaseCode | 8f58c8b817eb9f4b0f0a5be437a52d5b5fab3433 | 9ab4a578b2692fdbb6aeeacb310251d51f72e953 | refs/heads/master | 2020-05-17T16:26:02.598344 | 2019-04-27T20:49:40 | 2019-04-27T20:49:40 | 183,817,172 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | """
Name : 4375OS_08_17_date_var.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import pandas as pd
url='http://chart.yahoo.com/table.csv?s=IBM'
x=pd.read_csv(url,index_col=0,parse_dates=True)
| [
"944727327@qq.com"
] | 944727327@qq.com |
d3c841c95e795df6a9e12f940ae734222b343013 | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /content/test/gpu/generate_buildbot_json.py | 2b2e64015db01b7c9dab9fd42479a2f27f1659d3 | [
"BSD-3-Clause"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 72,432 | py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to generate chromium.gpu.json and chromium.gpu.fyi.json in
the src/testing/buildbot directory. Maintaining these files by hand is
too unwieldy.
"""
import copy
import json
import os
import string
import sys
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR)))
# Current stable Windows 7 NVIDIA GT 610 device/driver identifier.
WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER = '10de:104a-21.21.13.7254'
# Current experimental Windows NVIDIA GT 610 device/driver identifier.
WIN_NVIDIA_GEFORCE_610_EXPERIMENTAL_DRIVER = '10de:104a-23.21.13.8792'
# Current stable Windows 10 NVIDIA GT 610 device/driver identifier.
WIN_10_NVIDIA_GEFORCE_610_STABLE_DRIVER = '10de:104a-21.21.13.7619'
# Use this to match all drivers for the NVIDIA GT 610.
NVIDIA_GEFORCE_610_ALL_DRIVERS = '10de:104a-*'
# "Types" of waterfalls and bots. A bot's type is the union of its own
# type and the type of its waterfall. Predicates can apply to these
# sets in order to run tests only on a certain subset of the bots.
class Types(object):
GPU = 'gpu'
GPU_FYI = 'gpu_fyi'
OPTIONAL = 'optional'
V8_FYI = 'v8_fyi'
# The Win ANGLE AMD tryserver is split off because there isn't
# enough capacity to run all the tests from chromium.gpu.fyi's Win
# AMD bot on a tryserver. It represents some of the tests on
# win_angle_rel_ng and is not a real machine on the waterfall.
WIN_ANGLE_AMD_TRYSERVER = 'win_angle_amd_tryserver'
# The dEQP tests use a different compiler configuration than the
# rest of the bots; they're the only target which use exceptions and
# RTTI. They're split off so that these specialized compiler options
# apply only to these targets.
DEQP = 'deqp'
# The predicate functions receive a list of types as input and
# determine whether the test should run on the given bot.
class Predicates(object):
@staticmethod
def DEFAULT(x):
# By default, tests run on the chromium.gpu and chromium.gpu.fyi
# waterfalls, but not on the DEQP bots, not on the optional
# tryservers, not on the client.v8.fyi waterfall, nor on the Win
# ANGLE AMD tryserver.
return Types.DEQP not in x and Types.OPTIONAL not in x and \
Types.V8_FYI not in x and Types.WIN_ANGLE_AMD_TRYSERVER not in x
@staticmethod
def FYI_ONLY(x):
# This predicate is more complex than desired because the optional
# tryservers and the Win ANGLE AMD tryserver are considered to be
# on the chromium.gpu.fyi waterfall.
return Types.GPU_FYI in x and Types.DEQP not in x and \
Types.OPTIONAL not in x and \
Types.WIN_ANGLE_AMD_TRYSERVER not in x
@staticmethod
def FYI_AND_OPTIONAL(x):
return Predicates.FYI_ONLY(x) or Types.OPTIONAL in x
@staticmethod
def FYI_AND_OPTIONAL_AND_WIN_ANGLE_AMD(x):
return Predicates.FYI_ONLY(x) or Types.OPTIONAL in x or \
Types.WIN_ANGLE_AMD_TRYSERVER in x
@staticmethod
def FYI_OPTIONAL_AND_V8(x):
return Predicates.FYI_AND_OPTIONAL(x) or Types.V8_FYI in x
@staticmethod
def FYI_OPTIONAL_V8_AND_WIN_ANGLE_AMD(x):
return Predicates.FYI_OPTIONAL_AND_V8(x) or \
Types.WIN_ANGLE_AMD_TRYSERVER in x
@staticmethod
def DEFAULT_PLUS_V8(x):
return Predicates.DEFAULT(x) or Types.V8_FYI in x
@staticmethod
def DEFAULT_AND_OPTIONAL(x):
return Predicates.DEFAULT(x) or Types.OPTIONAL in x
@staticmethod
def DEQP(x):
return Types.DEQP in x
# Most of the bots live in the Chrome-GPU pool as defined here (Google
# employees only, sorry):
# https://chrome-internal.googlesource.com/infradata/config/+/master/configs/
# chromium-swarm/bots.cfg
#
# Some of them, like the Mac Minis and Nexus 5X devices, are shared
# resources and live in the regular Chrome pool.
WATERFALL = {
'name': 'chromium.gpu',
'type': Types.GPU,
'builders': {
'GPU Win Builder' : {},
'GPU Win Builder (dbg)' : {},
'GPU Mac Builder' : {},
'GPU Mac Builder (dbg)' : {},
'GPU Linux Builder' : {},
'GPU Linux Builder (dbg)' : {},
},
'testers': {
'Win7 Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
},
'Win7 Debug (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'win',
},
'Mac Release (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
},
'Mac Debug (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'mac',
},
'Mac Retina Release (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
},
'Mac Retina Debug (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'mac',
},
'Linux Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'linux',
},
'Linux Debug (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'linux',
},
}
}
FYI_WATERFALL = {
'name': 'chromium.gpu.fyi',
'type': Types.GPU_FYI,
'builders': {
'GPU Win Builder' : {},
'GPU Win Builder (dbg)' : {},
'GPU Win x64 Builder' : {},
'GPU Win x64 Builder (dbg)' : {},
'GPU Mac Builder' : {},
'GPU Mac Builder (dbg)' : {},
'GPU Linux Builder' : {},
'GPU Linux Builder (dbg)' : {},
'GPU Linux Ozone Builder' : {},
},
'testers': {
'Win7 Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
},
'Win7 Debug (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'win',
},
'Win7 dEQP Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
'type': Types.DEQP,
},
'Win7 Experimental Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_NVIDIA_GEFORCE_610_EXPERIMENTAL_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
},
'Win10 Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_10_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-10',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
},
'Win10 Debug (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_10_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-10',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'win',
},
'Win7 Release (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
},
'Win7 Debug (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'win',
},
'Win7 dEQP Release (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
'type': Types.DEQP,
},
'Win10 Release (Intel HD 630)': {
'swarming_dimensions': [
{
'gpu': '8086:5912',
'os': 'Windows-10',
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'win',
},
'Win10 Release (NVIDIA Quadro P400)': {
'swarming_dimensions': [
{
'gpu': '10de:1cb3',
'os': 'Windows-10'
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'win',
},
'Win7 Release (AMD R7 240)': {
'swarming_dimensions': [
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1',
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'win',
},
'Win7 x64 Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release_x64',
'swarming': True,
'os_type': 'win',
},
'Win7 x64 Debug (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug_x64',
'swarming': True,
'os_type': 'win',
},
'Win7 x64 dEQP Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release_x64',
'swarming': True,
'os_type': 'win',
'type': Types.DEQP,
},
'Mac Release (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
},
'Mac Debug (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'mac',
},
'Mac Pro Release (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:679e',
'os': 'Mac-10.10'
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'mac',
},
'Mac Pro Debug (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:679e',
'os': 'Mac-10.10'
},
],
'build_config': 'Debug',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'mac',
},
'Mac Retina Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:0fe9',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
},
'Mac Retina Debug (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:0fe9',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'mac',
},
'Mac Retina Release (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
},
'Mac Retina Debug (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'mac',
},
'Mac Experimental Release (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6'
},
],
'build_config': 'Release',
# This bot is a one-off for testing purposes.
'swarming': False,
'os_type': 'mac',
},
'Mac Experimental Retina Release (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Release',
# This bot is a one-off for testing purposes.
'swarming': False,
'os_type': 'mac',
},
'Mac Experimental Retina Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:0fe9',
'hidpi': '1',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Release',
# This bot is a one-off for testing purposes.
'swarming': False,
'os_type': 'mac',
},
'Mac GPU ASAN Release': {
# This bot spawns jobs on multiple GPU types.
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
'is_asan': True,
},
'Mac dEQP Release AMD': {
# This bot spawns jobs on multiple GPU types.
'swarming_dimensions': [
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
'type': Types.DEQP,
},
'Mac dEQP Release Intel': {
# This bot spawns jobs on multiple GPU types.
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
'type': Types.DEQP,
},
'Linux Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'linux',
},
'Linux Release (NVIDIA Quadro P400)': {
'swarming_dimensions': [
{
'gpu': '10de:1cb3',
'os': 'Ubuntu'
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'linux',
},
'Linux Debug (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Debug',
'swarming': True,
'os_type': 'linux',
},
'Linux dEQP Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'linux',
'type': Types.DEQP,
},
'Linux Release (Intel HD 630)': {
'swarming_dimensions': [
{
'gpu': '8086:5912',
'os': 'Ubuntu'
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'linux',
},
'Linux Release (AMD R7 240)': {
'swarming_dimensions': [
{
'gpu': '1002:6613',
'os': 'Ubuntu'
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'linux',
},
'Linux GPU TSAN Release': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'linux',
'instrumentation_type': 'tsan',
},
'Linux Ozone (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:1912',
'os': 'Ubuntu'
},
],
'build_config': 'Release',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'linux',
},
'Android Release (Nexus 5)': {
'swarming_dimensions': [
{
# There are no PCI IDs on Android.
# This is a hack to get the script working.
'gpu': '0000:0000',
'os': 'Android'
},
],
'build_config': 'android-chromium',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'android',
},
'Android Release (Nexus 5X)': {
'swarming_dimensions': [
{
'device_type': 'bullhead',
'device_os': 'M',
'os': 'Android'
},
],
'build_config': 'android-chromium',
'swarming': True,
'os_type': 'android',
},
'Android Release (Nexus 6)': {
'swarming_dimensions': [
{
# There are no PCI IDs on Android.
# This is a hack to get the script working.
'gpu': '0000:0000',
'os': 'Android'
},
],
'build_config': 'android-chromium',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'android',
},
'Android Release (Nexus 6P)': {
'swarming_dimensions': [
{
# There are no PCI IDs on Android.
# This is a hack to get the script working.
'gpu': '0000:0000',
'os': 'Android'
},
],
'build_config': 'android-chromium',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'android',
},
'Android Release (Nexus 9)': {
'swarming_dimensions': [
{
# There are no PCI IDs on Android.
# This is a hack to get the script working.
'gpu': '0000:0000',
'os': 'Android'
},
],
'build_config': 'android-chromium',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'android',
},
'Android Release (NVIDIA Shield TV)': {
'swarming_dimensions': [
{
# There are no PCI IDs on Android.
# This is a hack to get the script working.
'gpu': '0000:0000',
'os': 'Android'
},
],
'build_config': 'android-chromium',
# This bot is a one-off and doesn't have similar slaves in the
# swarming pool.
'swarming': False,
'os_type': 'android',
},
'Android dEQP Release (Nexus 5X)': {
'swarming_dimensions': [
{
'device_type': 'bullhead',
'device_os': 'M',
'os': 'Android'
},
],
'build_config': 'android-chromium',
'swarming': True,
'os_type': 'android',
'type': Types.DEQP,
},
# The following "optional" testers don't actually exist on the
# waterfall. They are present here merely to specify additional
# tests which aren't on the main tryservers. Unfortunately we need
# a completely different (redundant) bot specification to handle
# this.
'Optional Win7 Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
'type': Types.OPTIONAL,
},
'Optional Mac Release (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
'type': Types.OPTIONAL,
},
'Optional Mac Retina Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:0fe9',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
'type': Types.OPTIONAL,
},
'Optional Mac Retina Release (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
'type': Types.OPTIONAL,
},
'Optional Linux Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'linux',
'type': Types.OPTIONAL,
},
# This tryserver doesn't actually exist; it's a separate
# configuration from the Win AMD bot on this waterfall because we
# don't have enough tryserver capacity to run all the tests from
# that bot on win_angle_rel_ng.
'Win7 ANGLE Tryserver (AMD)': {
'swarming_dimensions': [
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
'type': Types.WIN_ANGLE_AMD_TRYSERVER,
},
}
}
V8_FYI_WATERFALL = {
'name': 'client.v8.fyi',
'type': Types.V8_FYI,
'prologue': {
"V8 Android GN (dbg)": {
"additional_compile_targets": [
"chrome_public_apk"
],
"gtest_tests": []
},
"V8 Linux GN": {
"additional_compile_targets": [
"accessibility_unittests",
"aura_unittests",
"browser_tests",
"cacheinvalidation_unittests",
"capture_unittests",
"cast_unittests",
"cc_unittests",
"chromedriver_unittests",
"components_browsertests",
"components_unittests",
"content_browsertests",
"content_unittests",
"crypto_unittests",
"dbus_unittests",
"device_unittests",
"display_unittests",
"events_unittests",
"extensions_browsertests",
"extensions_unittests",
"gcm_unit_tests",
"gfx_unittests",
"gn_unittests",
"google_apis_unittests",
"gpu_ipc_service_unittests",
"gpu_unittests",
"interactive_ui_tests",
"ipc_tests",
"jingle_unittests",
"media_unittests",
"media_blink_unittests",
"mojo_common_unittests",
"mojo_public_bindings_unittests",
"mojo_public_system_unittests",
"mojo_system_unittests",
"nacl_loader_unittests",
"net_unittests",
"pdf_unittests",
"ppapi_unittests",
"printing_unittests",
"remoting_unittests",
"sandbox_linux_unittests",
"skia_unittests",
"sql_unittests",
"storage_unittests",
"sync_integration_tests",
"ui_base_unittests",
"ui_touch_selection_unittests",
"unit_tests",
"url_unittests",
"views_unittests",
"wm_unittests"
]
}
},
'testers': {
'Win Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': WIN_7_NVIDIA_GEFORCE_610_STABLE_DRIVER,
'os': 'Windows-2008ServerR2-SP1',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'win',
},
'Mac Release (Intel)': {
'swarming_dimensions': [
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'mac',
},
'Linux Release (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'linux',
},
'Linux Release - concurrent marking (NVIDIA)': {
'swarming_dimensions': [
{
'gpu': '10de:104a',
'os': 'Ubuntu',
'pool': 'Chrome-GPU',
},
],
'build_config': 'Release',
'swarming': True,
'os_type': 'linux',
},
'Android Release (Nexus 5X)': {
'swarming_dimensions': [
{
'device_type': 'bullhead',
'device_os': 'M',
'os': 'Android'
},
],
'build_config': 'android-chromium',
'swarming': True,
'os_type': 'android',
},
}
}
COMMON_GTESTS = {
'angle_deqp_egl_tests': {
'tester_configs': [
{
'predicate': Predicates.DEQP,
# Run only on the Win7 Release NVIDIA 32- and 64-bit bots
# (and trybots) for the time being, at least until more capacity is
# added.
# TODO(jmadill): Run on the Linux Release NVIDIA bots.
'build_configs': ['Release', 'Release_x64'],
'swarming_dimension_sets': [
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
}
],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'swarming': {
'shards': 4,
},
'args': [
'--test-launcher-batch-limit=400'
]
},
'angle_deqp_gles2_d3d11_tests': {
'tester_configs': [
{
'predicate': Predicates.DEQP,
'swarming_dimension_sets': [
# NVIDIA Win 7
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
},
# AMD Win 7
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1'
},
],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'desktop_swarming': {
'shards': 4,
},
'test': 'angle_deqp_gles2_tests',
'args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-d3d11'
]
},
'angle_deqp_gles2_gl_tests': {
'tester_configs': [
{
'predicate': Predicates.DEQP,
'swarming_dimension_sets': [
# Linux NVIDIA
{
'gpu': '10de:104a',
'os': 'Ubuntu'
},
# Mac Intel
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6'
},
# Mac AMD
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6'
},
],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'desktop_swarming': {
'shards': 4,
},
'test': 'angle_deqp_gles2_tests',
'args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-gl'
]
},
'angle_deqp_gles2_gles_tests': {
'tester_configs': [
{
'predicate': Predicates.DEQP,
# Run on Nexus 5X swarmed bots.
'build_configs': ['android-chromium'],
'swarming_dimension_sets': [
# Nexus 5X
{
'device_type': 'bullhead',
'device_os': 'M',
'os': 'Android'
}
],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'swarming': {
'shards': 4,
},
'test': 'angle_deqp_gles2_tests',
# Only pass the display type to desktop. The Android runner doesn't support
# passing args to the executable but only one display type is supported on
# Android anyways.
'desktop_args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-gles'
],
'android_args': [
'--enable-xml-result-parsing',
'--shard-timeout=500'
],
},
'angle_deqp_gles3_gles_tests': {
'tester_configs': [
{
'predicate': Predicates.DEQP,
# Run on Nexus 5X swarmed bots.
'build_configs': ['android-chromium'],
'swarming_dimension_sets': [
# Nexus 5X
{
'device_type': 'bullhead',
'device_os': 'M',
'os': 'Android'
}
],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'swarming': {
'shards': 12,
},
'test': 'angle_deqp_gles3_tests',
# Only pass the display type to desktop. The Android runner doesn't support
# passing args to the executable but only one display type is supported on
# Android anyways.
'desktop_args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-gles'
],
'android_args': [
'--enable-xml-result-parsing',
'--shard-timeout=500'
],
},
'angle_deqp_gles3_d3d11_tests': {
'tester_configs': [
{
# TODO(jmadill): Run this on ANGLE roll tryservers.
'predicate': Predicates.DEQP,
'swarming_dimension_sets': [
# NVIDIA Win 7
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
},
# AMD Win 7
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1'
},
],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'swarming': {
'shards': 12,
},
'test': 'angle_deqp_gles3_tests',
'args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-d3d11'
]
},
'angle_deqp_gles3_gl_tests': {
'tester_configs': [
{
# TODO(jmadill): Run this on ANGLE roll tryservers.
'predicate': Predicates.DEQP,
'swarming_dimension_sets': [
# NVIDIA Linux
{
'gpu': '10de:104a',
'os': 'Ubuntu'
},
# Mac Intel
{
'gpu': '8086:0a2e',
'os': 'Mac-10.12.6'
},
# Mac AMD
{
'gpu': '1002:6821',
'hidpi': '1',
'os': 'Mac-10.12.6'
},
],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'swarming': {
'shards': 12,
},
'test': 'angle_deqp_gles3_tests',
'args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-gl'
]
},
'angle_deqp_gles31_d3d11_tests': {
'tester_configs': [
{
'predicate': Predicates.DEQP,
'swarming_dimension_sets': [
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
}
],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'swarming': {
'shards': 6,
},
'test': 'angle_deqp_gles31_tests',
'args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-d3d11'
]
},
'angle_deqp_gles31_gl_tests': {
'tester_configs': [
{
'predicate': Predicates.DEQP,
'swarming_dimension_sets': [
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
},
{
'gpu': '10de:104a',
'os': 'Ubuntu'
}
],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'swarming': {
'shards': 6,
},
'test': 'angle_deqp_gles31_tests',
'args': [
'--test-launcher-batch-limit=400',
'--deqp-egl-display-type=angle-gl'
]
},
# Until we have more capacity, run angle_end2end_tests only on the
# FYI waterfall, the ANGLE trybots (which mirror the FYI waterfall),
# and the optional trybots (mainly used during ANGLE rolls).
'angle_end2end_tests': {
'tester_configs': [
{
'predicate': Predicates.FYI_AND_OPTIONAL_AND_WIN_ANGLE_AMD,
},
],
'disabled_tester_configs': [
{
'names': [
# TODO(ynovikov) Investigate why the test breaks on older devices.
'Android Release (Nexus 5)',
'Android Release (Nexus 6)',
'Android Release (Nexus 9)',
],
},
],
'desktop_args': [
'--use-gpu-in-tests',
# ANGLE test retries deliberately disabled to prevent flakiness.
# http://crbug.com/669196
'--test-launcher-retry-limit=0'
]
},
# white_box tests should run where end2end tests run
'angle_white_box_tests': {
'tester_configs': [
{
'predicate': Predicates.FYI_AND_OPTIONAL_AND_WIN_ANGLE_AMD,
# There are only Windows white box tests for now.
# Enable on more configs when there will be relevant tests.
'os_types': ['win'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'desktop_args': [
# ANGLE test retries deliberately disabled to prevent flakiness.
# http://crbug.com/669196
'--test-launcher-retry-limit=0'
]
},
'angle_unittests': {
'desktop_args': [
'--use-gpu-in-tests',
# ANGLE test retries deliberately disabled to prevent flakiness.
# http://crbug.com/669196
'--test-launcher-retry-limit=0'
],
'linux_args': [ '--no-xvfb' ]
},
# Until the media-only tests are extracted from content_unittests,
# and audio_unittests and content_unittests can be run on the commit
# queue with --require-audio-hardware-for-testing, run them only on
# the FYI waterfall.
#
# Note that the transition to the Chromium recipe has forced the
# removal of the --require-audio-hardware-for-testing flag for the
# time being. See crbug.com/574942.
'audio_unittests': {
'tester_configs': [
{
'predicate': Predicates.FYI_AND_OPTIONAL,
}
],
# Don't run these tests on Android.
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
{
'os_types': ['android'],
},
],
'args': ['--use-gpu-in-tests']
},
# TODO(kbr): content_unittests is killing the Linux GPU swarming
# bots. crbug.com/582094 . It's not useful now anyway until audio
# hardware is deployed on the swarming bots, so stop running it
# everywhere.
# 'content_unittests': {},
'gl_tests': {
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'desktop_args': ['--use-gpu-in-tests']
},
'gl_tests_passthrough': {
'tester_configs': [
{
'os_types': ['win'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'test': 'gl_tests',
'desktop_args': [
'--use-gpu-in-tests',
'--use-passthrough-cmd-decoder',
]
},
'gl_unittests': {
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'desktop_args': ['--use-gpu-in-tests'],
'linux_args': [ '--no-xvfb' ]
},
'gpu_unittests': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
# gpu_unittests is killing the Swarmed Linux GPU bots
# similarly to how content_unittests was:
# http://crbug.com/763498 .
'os_types': ['win', 'mac', 'android'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
# The gles2_conform_tests are closed-source and deliberately only run
# on the FYI waterfall and the optional tryservers.
'gles2_conform_test': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
}
],
# Don't run these tests on Android.
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
{
'os_types': ['android'],
},
],
'args': ['--use-gpu-in-tests']
},
'gles2_conform_d3d9_test': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
'os_types': ['win'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'args': [
'--use-gpu-in-tests',
'--use-angle=d3d9',
],
'test': 'gles2_conform_test',
},
'gles2_conform_gl_test': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
'os_types': ['win'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'args': [
'--use-gpu-in-tests',
'--use-angle=gl',
'--disable-gpu-sandbox',
],
'test': 'gles2_conform_test',
},
# Face and barcode detection unit tests, which currently only run on
# Mac OS, and require physical hardware.
'services_unittests': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
'os_types': ['mac'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
{
'swarming_dimension_sets': [
# These tests fail on the Mac Pros.
{
'gpu': '1002:679e',
},
],
},
],
'args': [
'--gtest_filter=*Detection*',
'--use-gpu-in-tests'
]
},
'swiftshader_unittests': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
'os_types': ['win', 'linux', 'mac'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
'Mac Experimental Release (Intel)',
'Mac Experimental Retina Release (AMD)',
'Mac Experimental Retina Release (NVIDIA)',
'Mac Pro Release (AMD)',
],
},
],
},
'tab_capture_end2end_tests': {
'tester_configs': [
{
'build_configs': ['Release', 'Release_x64'],
'disabled_instrumentation_types': ['tsan'],
}
],
# Don't run these tests on Android.
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
'os_types': ['android'],
},
],
'args': [
'--enable-gpu',
'--test-launcher-bot-mode',
'--test-launcher-jobs=1',
'--gtest_filter=CastStreamingApiTestWithPixelOutput.EndToEnd*:' + \
'TabCaptureApiPixelTest.EndToEnd*'
],
'linux_args': [ '--no-xvfb' ],
'test': 'browser_tests',
},
'video_decode_accelerator_d3d11_unittest': {
'tester_configs': [
{
'os_types': ['win']
},
],
'args': [
'--use-angle=d3d11',
'--use-test-data-path',
'--test_video_data=test-25fps.h264:320:240:250:258:::1',
],
'test': 'video_decode_accelerator_unittest',
},
'video_decode_accelerator_d3d9_unittest': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_ONLY,
'os_types': ['win']
},
],
'args': [
'--use-angle=d3d9',
'--use-test-data-path',
'--test_video_data=test-25fps.h264:320:240:250:258:::1',
],
'test': 'video_decode_accelerator_unittest',
},
'video_decode_accelerator_gl_unittest': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_ONLY,
'os_types': ['win']
},
],
# Windows Intel doesn't have the GL extensions to support this test
'disabled_tester_configs': [
{
'names': [
'Win10 Release (Intel HD 630)',
],
},
],
'args': [
'--use-angle=gl',
'--use-test-data-path',
'--test_video_data=test-25fps.h264:320:240:250:258:::1',
],
'test': 'video_decode_accelerator_unittest',
},
}
# This requires a hack because the isolate's name is different than
# the executable's name. On the few non-swarmed testers, this causes
# the executable to not be found. It would be better if the Chromium
# recipe supported running isolates locally. crbug.com/581953
NON_SWARMED_GTESTS = {
'tab_capture_end2end_tests': {
'swarming': {
'can_use_on_swarming_builders': False
},
# Don't run these tests on Android.
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
'os_types': ['android'],
},
],
'test': 'browser_tests',
'args': [
'--enable-gpu',
'--no-xvfb',
'--test-launcher-jobs=1',
'--gtest_filter=CastStreamingApiTestWithPixelOutput.EndToEnd*:' + \
'TabCaptureApiPixelTest.EndToEnd*'
],
'swarming': {
'can_use_on_swarming_builders': False,
},
}
}
# These tests use Telemetry's new browser_test_runner, which is a much
# simpler harness for correctness testing.
TELEMETRY_GPU_INTEGRATION_TESTS = {
'context_lost': {
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'depth_capture': {
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'gpu_process_launch_tests': {
'target_name': 'gpu_process',
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'hardware_accelerated_feature': {
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'info_collection_tests': {
'target_name': 'info_collection',
'args': [
'--expected-vendor-id',
'${gpu_vendor_id}',
'--expected-device-id',
'${gpu_device_id}',
],
'tester_configs': [
{
'predicate': Predicates.DEFAULT_AND_OPTIONAL,
'disabled_instrumentation_types': ['tsan'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
# The Mac ASAN swarming runs on two different GPU types so we can't
# have one expected vendor ID / device ID
'Mac GPU ASAN Release',
],
},
],
},
'maps_pixel_test': {
'target_name': 'maps',
'args': [
'--os-type',
'${os_type}',
'--build-revision',
'${got_revision}',
'--test-machine-name',
'${buildername}',
],
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'pixel_test': {
'target_name': 'pixel',
'args': [
'--refimg-cloud-storage-bucket',
'chromium-gpu-archive/reference-images',
'--os-type',
'${os_type}',
'--build-revision',
'${got_revision}',
'--test-machine-name',
'${buildername}',
],
'non_precommit_args': [
'--upload-refimg-to-cloud-storage',
],
'precommit_args': [
'--download-refimg-from-cloud-storage',
],
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'screenshot_sync': {
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'trace_test': {
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
},
'webgl_conformance': {
'tester_configs': [
{
'predicate': Predicates.DEFAULT_PLUS_V8,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'asan_args': ['--is-asan'],
'android_swarming': {
# On desktop platforms these don't take very long (~7 minutes),
# but on Android they take ~30 minutes and we want to shard them
# when sharding is available -- specifically on the Nexus 5X
# bots, which are currently the only Android configuration on
# the waterfalls where these tests are swarmed. If we had to
# restrict the sharding to certain Android devices, then we'd
# need some way to apply these Swarming parameters only to a
# subset of machines, like the way the tester_configs work.
'shards': 6,
},
'android_args': [
# The current working directory when run via isolate is
# out/Debug or out/Release. Reference this file relatively to
# it.
'--read-abbreviated-json-results-from=' + \
'../../content/test/data/gpu/webgl_conformance_tests_output.json',
],
},
'webgl_conformance_d3d9_tests': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
'os_types': ['win'],
'disabled_instrumentation_types': ['tsan'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'target_name': 'webgl_conformance',
'extra_browser_args': [
'--use-angle=d3d9',
],
'asan_args': ['--is-asan'],
'swarming': {
'shards': 2,
},
},
'webgl_conformance_gl_tests': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
'os_types': ['win'],
'disabled_instrumentation_types': ['tsan'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
{
'swarming_dimension_sets': [
# crbug.com/555545 and crbug.com/649824:
# Disable webgl_conformance_gl_tests on some Win/AMD cards.
# Always fails on older cards, flaky on newer cards.
# Note that these must match the GPUs exactly; wildcard
# matches (i.e., only device ID) aren't supported!
{
'gpu': '1002:6779',
'os': 'Windows-2008ServerR2-SP1'
},
{
'gpu': '1002:6613',
'os': 'Windows-2008ServerR2-SP1'
},
# BUG 590951: Disable webgl_conformance_gl_tests on Win/Intel
{
'gpu': '8086:041a',
'os': 'Windows-2008ServerR2-SP1'
},
{
'gpu': '8086:0412',
'os': 'Windows-2008ServerR2-SP1'
},
],
},
],
'target_name': 'webgl_conformance',
'extra_browser_args': [
'--use-angle=gl',
],
'asan_args': ['--is-asan'],
'swarming': {
'shards': 2,
},
},
'webgl_conformance_d3d11_passthrough': {
'tester_configs': [
{
# Run this on the FYI waterfall, optional tryservers, and Win
# ANGLE AMD tryserver.
'predicate': Predicates.FYI_AND_OPTIONAL_AND_WIN_ANGLE_AMD,
'os_types': ['win'],
'disabled_instrumentation_types': ['tsan'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'target_name': 'webgl_conformance',
'extra_browser_args': [
'--use-angle=d3d11',
'--use-passthrough-cmd-decoder',
],
'asan_args': ['--is-asan'],
'swarming': {
'shards': 2,
},
},
'webgl_conformance_gl_passthrough': {
'tester_configs': [
{
# Run this on the FYI waterfall and optional tryservers.
'predicate': Predicates.FYI_AND_OPTIONAL,
'os_types': ['linux'],
'disabled_instrumentation_types': ['tsan'],
}
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'target_name': 'webgl_conformance',
'extra_browser_args': [
'--use-gl=angle',
'--use-angle=gl',
'--use-passthrough-cmd-decoder',
],
'asan_args': ['--is-asan'],
'swarming': {
'shards': 2,
},
},
'webgl2_conformance_tests': {
'tester_configs': [
{
# The WebGL 2.0 conformance tests take over an hour to run on
# the Debug bots, which is too long.
'build_configs': ['Release', 'Release_x64'],
'predicate': Predicates.FYI_OPTIONAL_V8_AND_WIN_ANGLE_AMD,
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
# http://crbug.com/599451: this test is currently too slow
# to run on x64 in Debug mode. Need to shard the tests.
'Win7 x64 Debug (NVIDIA)',
# The Mac NVIDIA Retina bots don't have the capacity to run
# this test suite on mac_optional_gpu_tests_rel.
'Optional Mac Retina Release (NVIDIA)',
],
# Don't run these tests on Android yet.
'os_types': ['android'],
},
],
'target_name': 'webgl_conformance',
'args': [
'--webgl-conformance-version=2.0.1',
# The current working directory when run via isolate is
# out/Debug or out/Release. Reference this file relatively to
# it.
'--read-abbreviated-json-results-from=' + \
'../../content/test/data/gpu/webgl2_conformance_tests_output.json',
],
'asan_args': ['--is-asan'],
'swarming': {
# These tests currently take about an hour and fifteen minutes
# to run. Split them into roughly 5-minute shards.
'shards': 15,
},
},
'webgl2_conformance_gl_passthrough_tests': {
'tester_configs': [
{
# The WebGL 2.0 conformance tests take over an hour to run on
# the Debug bots, which is too long.
'build_configs': ['Release'],
'predicate': Predicates.FYI_ONLY,
# Only run on the NVIDIA Release and Intel Release Linux bots.
'swarming_dimension_sets': [
{
'gpu': '10de:104a',
'os': 'Ubuntu'
},
{
'gpu': '8086:0412',
'os': 'Ubuntu'
},
{
'gpu': '8086:1912',
'os': 'Ubuntu'
},
],
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'target_name': 'webgl_conformance',
'extra_browser_args': [
'--use-gl=angle',
'--use-angle=gl',
'--use-passthrough-cmd-decoder',
],
'args': [
'--webgl-conformance-version=2.0.1',
# The current working directory when run via isolate is
# out/Debug or out/Release. Reference this file relatively to
# it.
'--read-abbreviated-json-results-from=' + \
'../../content/test/data/gpu/webgl2_conformance_tests_output.json',
],
'asan_args': ['--is-asan'],
'swarming': {
# These tests currently take about an hour and fifteen minutes
# to run serially.
'shards': 20,
},
},
'webgl2_conformance_gl_tests': {
'tester_configs': [
{
# The WebGL 2.0 conformance tests take over an hour to run on
# the Debug bots, which is too long.
'build_configs': ['Release'],
'predicate': Predicates.FYI_ONLY,
# Only run on the NVIDIA Release Windows bots.
'swarming_dimension_sets': [
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
},
],
'disabled_instrumentation_types': ['tsan'],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'target_name': 'webgl_conformance',
'extra_browser_args': [
'--use-angle=gl',
],
'args': [
'--webgl-conformance-version=2.0.1',
# The current working directory when run via isolate is
# out/Debug or out/Release. Reference this file relatively to
# it.
'--read-abbreviated-json-results-from=' + \
'../../content/test/data/gpu/webgl2_conformance_tests_output.json',
],
'asan_args': ['--is-asan'],
'swarming': {
# These tests currently take about an hour and fifteen minutes
# to run serially.
'shards': 20,
},
},
'webgl2_conformance_d3d11_passthrough_tests': {
'tester_configs': [
{
# The WebGL 2.0 conformance tests take over an hour to run on
# the Debug bots, which is too long.
'build_configs': ['Release'],
'predicate': Predicates.FYI_ONLY,
# Only run on the NVIDIA Release Windows bots.
'swarming_dimension_sets': [
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
},
],
'disabled_instrumentation_types': ['tsan'],
},
],
'target_name': 'webgl_conformance',
'extra_browser_args': [
'--use-angle=d3d11',
'--use-passthrough-cmd-decoder',
],
'args': [
'--webgl-conformance-version=2.0.1',
# The current working directory when run via isolate is
# out/Debug or out/Release. Reference this file relatively to
# it.
'--read-abbreviated-json-results-from=' + \
'../../content/test/data/gpu/webgl2_conformance_tests_output.json',
],
'asan_args': ['--is-asan'],
'swarming': {
# These tests currently take about an hour and fifteen minutes
# to run. Split them into roughly 5-minute shards.
'shards': 20,
},
},
}
# These isolated tests don't use telemetry. They need to be placed in the
# isolated_scripts section of the generated json.
NON_TELEMETRY_ISOLATED_SCRIPT_TESTS = {
# We run angle_perftests on the ANGLE CQ to ensure the tests don't crash.
'angle_perftests': {
'tester_configs': [
{
'predicate': Predicates.FYI_AND_OPTIONAL,
# Run on the Win/Linux Release NVIDIA bots.
'build_configs': ['Release'],
'swarming_dimension_sets': [
{
'gpu': NVIDIA_GEFORCE_610_ALL_DRIVERS,
'os': 'Windows-2008ServerR2-SP1'
},
{
'gpu': '10de:104a',
'os': 'Ubuntu'
}
],
},
],
'disabled_tester_configs': [
{
'names': [
'Linux Ozone (Intel)',
],
},
],
'args': [
# Tell the tests to exit after one frame for faster iteration.
'--one-frame-only',
],
},
}
def substitute_args(tester_config, args):
"""Substitutes the variables in |args| from tester_config properties."""
substitutions = {
'os_type': tester_config['os_type'],
'gpu_vendor_id': '0',
'gpu_device_id': '0',
}
if 'gpu' in tester_config['swarming_dimensions'][0]:
# First remove the driver version, then split into vendor and device.
gpu = tester_config['swarming_dimensions'][0]['gpu']
gpu = gpu.split('-')[0].split(':')
substitutions['gpu_vendor_id'] = gpu[0]
substitutions['gpu_device_id'] = gpu[1]
return [string.Template(arg).safe_substitute(substitutions) for arg in args]
def matches_swarming_dimensions(tester_config, dimension_sets):
for dimensions in dimension_sets:
for cur_dims in tester_config['swarming_dimensions']:
match = True
for key, value in dimensions.iteritems():
if key not in cur_dims:
match = False
elif value.endswith('*'):
if not cur_dims[key].startswith(value[0:-1]):
match = False
elif cur_dims[key] != value:
match = False
if match:
return True
return False
def is_android(tester_config):
return tester_config['os_type'] == 'android'
def is_linux(tester_config):
return tester_config['os_type'] == 'linux'
def is_asan(tester_config):
return tester_config.get('is_asan', False)
# Returns a list describing the type of this tester. It may include
# both the type of the bot as well as the waterfall.
def get_tester_type(tester_config):
result = []
if 'type' in tester_config:
result.append(tester_config['type'])
result.append(tester_config['parent']['type'])
return result
def tester_config_matches_tester(tester_name, tester_config, tc,
check_waterfall):
if check_waterfall:
if not tc.get('predicate', Predicates.DEFAULT)(
get_tester_type(tester_config)):
return False
if 'names' in tc:
# Give priority to matching the tester_name.
if tester_name in tc['names']:
return True
if not tester_name in tc['names']:
return False
if 'os_types' in tc:
if not tester_config['os_type'] in tc['os_types']:
return False
if 'instrumentation_type' in tester_config:
if 'disabled_instrumentation_types' in tc:
if tester_config['instrumentation_type'] in \
tc['disabled_instrumentation_types']:
return False
if 'build_configs' in tc:
if not tester_config['build_config'] in tc['build_configs']:
return False
if 'swarming_dimension_sets' in tc:
if not matches_swarming_dimensions(tester_config,
tc['swarming_dimension_sets']):
return False
return True
def should_run_on_tester(tester_name, tester_config, test_config):
# Check if this config is disabled on this tester
if 'disabled_tester_configs' in test_config:
for dtc in test_config['disabled_tester_configs']:
if tester_config_matches_tester(tester_name, tester_config, dtc, False):
return False
if 'tester_configs' in test_config:
for tc in test_config['tester_configs']:
if tester_config_matches_tester(tester_name, tester_config, tc, True):
return True
return False
else:
# If tester_configs is unspecified, run nearly all tests by default,
# but let tester_config_matches_tester filter out any undesired
# tests, such as ones that should only run on the Optional bots.
return tester_config_matches_tester(tester_name, tester_config, {}, True)
def remove_tester_configs_from_result(result):
if 'tester_configs' in result:
# Don't print the tester_configs in the JSON.
result.pop('tester_configs')
if 'disabled_tester_configs' in result:
# Don't print the disabled_tester_configs in the JSON.
result.pop('disabled_tester_configs')
def generate_gtest(tester_name, tester_config, test, test_config):
if not should_run_on_tester(tester_name, tester_config, test_config):
return None
result = copy.deepcopy(test_config)
if 'test' in result:
result['name'] = test
else:
result['test'] = test
if (not tester_config['swarming']) and test in NON_SWARMED_GTESTS:
# Need to override this result.
result = copy.deepcopy(NON_SWARMED_GTESTS[test])
result['name'] = test
else:
if not 'swarming' in result:
result['swarming'] = {}
result['swarming'].update({
'can_use_on_swarming_builders': tester_config['swarming'],
'dimension_sets': tester_config['swarming_dimensions']
})
if is_android(tester_config):
# Integrate with the unified logcat system.
result['swarming'].update({
'cipd_packages': [
{
'cipd_package': 'infra/tools/luci/logdog/butler/${platform}',
'location': 'bin',
'revision': 'git_revision:ff387eadf445b24c935f1cf7d6ddd279f8a6b04c'
}
],
'output_links': [
{
'link': [
'https://luci-logdog.appspot.com/v/?s',
'=android%2Fswarming%2Flogcats%2F',
'${TASK_ID}%2F%2B%2Funified_logcats'
],
'name': 'shard #${SHARD_INDEX} logcats'
}
]
})
def add_conditional_args(key, fn):
if key in result:
if fn(tester_config):
if not 'args' in result:
result['args'] = []
result['args'] += result[key]
# Don't put the conditional args in the JSON.
result.pop(key)
add_conditional_args('desktop_args', lambda cfg: not is_android(cfg))
add_conditional_args('linux_args', is_linux)
add_conditional_args('android_args', is_android)
if 'desktop_swarming' in result:
if not is_android(tester_config):
result['swarming'].update(result['desktop_swarming'])
# Don't put the desktop_swarming in the JSON.
result.pop('desktop_swarming')
# Remove the tester_configs and disabled_tester_configs, if present,
# from the result.
remove_tester_configs_from_result(result)
# This flag only has an effect on the Linux bots that run tests
# locally (as opposed to via Swarming), which are only those couple
# on the chromium.gpu.fyi waterfall. Still, there is no harm in
# specifying it everywhere.
result['use_xvfb'] = False
return result
def generate_gtests(tester_name, tester_config, test_dictionary):
# The relative ordering of some of the tests is important to
# minimize differences compared to the handwritten JSON files, since
# Python's sorts are stable and there are some tests with the same
# key (see gles2_conform_d3d9_test and similar variants). Avoid
# losing the order by avoiding coalescing the dictionaries into one.
gtests = []
for test_name, test_config in sorted(test_dictionary.iteritems()):
test = generate_gtest(tester_name, tester_config,
test_name, test_config)
if test:
# generate_gtest may veto the test generation on this platform.
gtests.append(test)
return gtests
def generate_isolated_test(tester_name, tester_config, test, test_config,
extra_browser_args, isolate_name,
override_compile_targets, prefix_args):
if not should_run_on_tester(tester_name, tester_config, test_config):
return None
test_args = ['-v']
extra_browser_args_string = ""
if extra_browser_args != None:
extra_browser_args_string += ' '.join(extra_browser_args)
if 'extra_browser_args' in test_config:
extra_browser_args_string += ' ' + ' '.join(
test_config['extra_browser_args'])
if extra_browser_args_string != "":
test_args.append('--extra-browser-args=' + extra_browser_args_string)
if 'args' in test_config:
test_args.extend(substitute_args(tester_config, test_config['args']))
if 'desktop_args' in test_config and not is_android(tester_config):
test_args.extend(substitute_args(tester_config,
test_config['desktop_args']))
if 'android_args' in test_config and is_android(tester_config):
test_args.extend(substitute_args(tester_config,
test_config['android_args']))
if 'asan_args' in test_config and is_asan(tester_config):
test_args.extend(substitute_args(tester_config,
test_config['asan_args']))
# The step name must end in 'test' or 'tests' in order for the
# results to automatically show up on the flakiness dashboard.
# (At least, this was true some time ago.) Continue to use this
# naming convention for the time being to minimize changes.
step_name = test
if not (step_name.endswith('test') or step_name.endswith('tests')):
step_name = '%s_tests' % step_name
# Prepend GPU-specific flags.
swarming = {
'can_use_on_swarming_builders': tester_config['swarming'],
'dimension_sets': tester_config['swarming_dimensions']
}
if 'swarming' in test_config:
swarming.update(test_config['swarming'])
if 'android_swarming' in test_config and is_android(tester_config):
swarming.update(test_config['android_swarming'])
result = {
'args': prefix_args + test_args,
'isolate_name': isolate_name,
'name': step_name,
'swarming': swarming,
}
if override_compile_targets != None:
result['override_compile_targets'] = override_compile_targets
if 'non_precommit_args' in test_config:
result['non_precommit_args'] = test_config['non_precommit_args']
if 'precommit_args' in test_config:
result['precommit_args'] = test_config['precommit_args']
return result
def generate_telemetry_test(tester_name, tester_config,
test, test_config):
extra_browser_args = ['--enable-logging=stderr', '--js-flags=--expose-gc']
benchmark_name = test_config.get('target_name') or test
prefix_args = [
benchmark_name,
'--show-stdout',
'--browser=%s' % tester_config['build_config'].lower(),
# --passthrough displays more of the logging in Telemetry when run
# --via typ, in particular some of the warnings about tests being
# --expected to fail, but passing.
'--passthrough',
]
return generate_isolated_test(tester_name, tester_config, test,
test_config, extra_browser_args,
'telemetry_gpu_integration_test',
['telemetry_gpu_integration_test'],
prefix_args)
def generate_telemetry_tests(tester_name, tester_config,
test_dictionary):
isolated_scripts = []
for test_name, test_config in sorted(test_dictionary.iteritems()):
test = generate_telemetry_test(
tester_name, tester_config, test_name, test_config)
if test:
isolated_scripts.append(test)
return isolated_scripts
def generate_non_telemetry_isolated_test(tester_name, tester_config,
test, test_config):
return generate_isolated_test(tester_name, tester_config, test,
test_config,
None, test, None, [])
def generate_non_telemetry_isolated_tests(tester_name, tester_config,
test_dictionary):
isolated_scripts = []
for test_name, test_config in sorted(test_dictionary.iteritems()):
test = generate_non_telemetry_isolated_test(
tester_name, tester_config, test_name, test_config)
if test:
isolated_scripts.append(test)
return isolated_scripts
def install_parent_links(waterfall):
# Make the testers point back to the top-level waterfall so that we
# can ask about its properties when determining whether a given test
# should run on a given waterfall.
for name, config in waterfall.get('testers', {}).iteritems():
config['parent'] = waterfall
def generate_all_tests(waterfall, filename):
tests = {}
for builder, config in waterfall.get('prologue', {}).iteritems():
tests[builder] = config
for builder, config in waterfall.get('builders', {}).iteritems():
tests[builder] = config
for name, config in waterfall['testers'].iteritems():
gtests = generate_gtests(name, config, COMMON_GTESTS)
isolated_scripts = \
generate_telemetry_tests(
name, config, TELEMETRY_GPU_INTEGRATION_TESTS) + \
generate_non_telemetry_isolated_tests(name, config,
NON_TELEMETRY_ISOLATED_SCRIPT_TESTS)
tests[name] = {
'gtest_tests': sorted(gtests, key=lambda x: x['test']),
'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
}
tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {}
with open(os.path.join(SRC_DIR, 'testing', 'buildbot', filename), 'wb') as fp:
json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
fp.write('\n')
def main():
install_parent_links(FYI_WATERFALL)
install_parent_links(WATERFALL)
install_parent_links(V8_FYI_WATERFALL)
generate_all_tests(FYI_WATERFALL, 'chromium.gpu.fyi.json')
generate_all_tests(WATERFALL, 'chromium.gpu.json')
generate_all_tests(V8_FYI_WATERFALL, 'client.v8.fyi.json')
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"jacob-chen@iotwrt.com"
] | jacob-chen@iotwrt.com |
42eefd3a6d0d2cec8f98979cc4dc82356db3b8bb | e780a5bd72f98ca2513c993d64a85b08578166a6 | /buildout-cache/eggs/Zope2-2.13.26-py2.7.egg/Testing/ZopeTestCase/zopedoctest/testWarningsTest.py | 8dd208bceaed33f910d7d572212d8302ed3bc1e6 | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | ##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Example doctest
"""
from unittest import TestSuite
from Testing.ZopeTestCase import ZopeDocFileSuite
def test_suite():
return TestSuite((
ZopeDocFileSuite('WarningsTest.txt'),
))
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com |
615ad3d1352d00f1e5667b7258d5c581fde14184 | 89670ba42a5087a3965cadb678a9f87b6d8286cf | /huati/migrations/0011_auto_20171111_1211.py | 2e77a6d316d0ae1c993e9a7bf58b897a1c3513c4 | [] | no_license | qimengmeng/django_lei_zhihu | f56dd08229bf66e183f3f5b82145d5e183a4d161 | 296a7ee1435efcb2492e0ca078a1d9b5c153e41e | refs/heads/master | 2020-04-08T15:11:35.696655 | 2018-01-06T11:43:08 | 2018-01-06T11:43:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-11 04:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('huati', '0010_auto_20171111_1202'),
]
operations = [
migrations.AlterField(
model_name='hua',
name='fenlei',
field=models.ManyToManyField(related_name='fenlei', to='huati.Huafen'),
),
]
| [
"leileili126@163.com"
] | leileili126@163.com |
8b58710838901f702b0d87c2013b13497abe0572 | 6bae32e5ad8f198e31d6da864b0a9f44d54c5ec6 | /src/monteur/egginfo/write.py | e427ef7edb14d7221532778e5b91c5b7fc054297 | [] | no_license | thefunny42/Zeam-Setup | 42dcce867a947fb82e6b17cebd47f92285187a9e | 0c27945b87b0150ee462b493cf646111822e8867 | refs/heads/master | 2020-04-15T15:37:40.153075 | 2013-03-15T10:42:36 | 2013-03-15T10:42:36 | 445,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,474 | py |
import logging
import os
from monteur.utils import create_directory
logger = logging.getLogger('monteur')
def write_pkg_info(path, package):
pkg_info = open(os.path.join(path, 'PKG-INFO'), 'w')
pkg_info.write('Metadata-Version: 1.0\n')
pkg_info.write('Name: %s\n' % package.name)
pkg_info.write('Version: %s\n' % package.version)
def write_options(key, value):
if value:
pkg_info.write('%s: %s\n' % (key, value))
write_options('Summary', package.summary)
write_options('Author', package.author)
write_options('Author-email', package.author_email)
write_options('License', package.license)
pkg_info.write('Platform: %s\n' % (package.platform or 'UNKNOWN'))
pkg_info.close()
def write_requires(path, package):
requirements = package.requirements
if requirements:
file = open(os.path.join(path, 'requires.txt'), 'w')
for requirement in requirements:
file.write(str(requirement) + '\n')
for extra, requirements in package.extras.items():
file.write('\n\n[%s]\n' % extra)
for requirement in requirements:
file.write(str(requirement) + '\n')
file.close()
def write_missing_setuptool_files(path, package):
for filename in ['dependency_links.txt', 'not-zip-safe']:
file = open(os.path.join(path, filename), 'w')
file.write('\n')
file.close()
def write_entry_points(path, package):
if package.entry_points:
formatted_points = ''
for section, entries in package.entry_points.items():
formatted_points += '[%s]\n' % section
for name, module in entries.items():
formatted_points += '%s = %s\n' % (name, module)
formatted_points += '\n'
entry_points = open(os.path.join(path, 'entry_points.txt'), 'w')
entry_points.write(formatted_points)
entry_points.close()
def write_egg_info(package, writers=[write_pkg_info,
write_missing_setuptool_files,
write_entry_points,
write_requires], package_path=None):
if package_path is None:
package_path = package.path
logger.info('Writing EGG-INFO in %s for %s' % (package_path, package.name))
path = os.path.join(package_path, 'EGG-INFO')
create_directory(path)
for writer in writers:
writer(path, package)
| [
"thefunny@gmail.com"
] | thefunny@gmail.com |
96411aeaf2d788d02c078f263691bf88afb267b8 | 657c80336bce1cc6158cd349ce208c5e680a4d0d | /pyglet/canvas/cocoa.py | cd44673204ca093754a7e6678941b655c1cfa070 | [
"BSD-3-Clause"
] | permissive | Xinmudotmoe/pyglet | b37628618647bf3b1e3d7db28202a5e14c60450c | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | refs/heads/master | 2021-05-29T22:05:40.676643 | 2015-10-24T05:55:49 | 2015-10-24T05:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,972 | py | """
"""
from ctypes import *
from ctypes import util
from pyglet import app
from .base import Display, Screen, ScreenMode, Canvas
from pyglet.libs.darwin.cocoapy import *
class CocoaDisplay(Display):
def get_screens(self):
max_displays = 256
active_displays = (CGDirectDisplayID * max_displays)()
count = c_uint32()
quartz.CGGetActiveDisplayList(
max_displays, active_displays, byref(count))
return [CocoaScreen(self, displayID) for displayID in
list(active_displays)[:count.value]]
class CocoaScreen(Screen):
def __init__(self, display, displayID):
bounds = quartz.CGDisplayBounds(displayID)
# FIX ME:
# Probably need to convert the origin coordinates depending on context:
# http://www.cocoabuilder.com/archive/cocoa/233492-ns-cg-rect-conversion-and-screen-coordinates.html
x, y = bounds.origin.x, bounds.origin.y
width, height = bounds.size.width, bounds.size.height
super().__init__(
display, int(x), int(y), int(width), int(height))
self._cg_display_id = displayID
# Save the default mode so we can restore to it.
self._default_mode = self.get_mode()
# FIX ME:
# This method is needed to get multi-monitor support working properly.
# However the NSScreens.screens() message currently sends out a warning:
# "*** -[NSLock unlock]: lock (<NSLock: 0x...> '(null)') unlocked when not locked"
# on Snow Leopard and apparently causes python to crash on Lion.
#
# def get_nsscreen(self):
# """Returns the NSScreen instance that matches our CGDirectDisplayID."""
# NSScreen = ObjCClass('NSScreen')
# # Get a list of all currently active NSScreens and then search through
# # them until we find one that matches our CGDisplayID.
# screen_array = NSScreen.screens()
# count = screen_array.count()
# for i in range(count):
# nsscreen = screen_array.objectAtIndex_(i)
# screenInfo = nsscreen.deviceDescription()
# displayID = screenInfo.objectForKey_(get_NSString('NSScreenNumber'))
# displayID = displayID.intValue()
# if displayID == self._cg_display_id:
# return nsscreen
# return None
def get_matching_configs(self, template):
canvas = CocoaCanvas(self.display, self, None)
return template.match(canvas)
def get_modes(self):
cgmodes = c_void_p(
quartz.CGDisplayCopyAllDisplayModes(self._cg_display_id, None))
modes = [CocoaScreenMode(self, cgmode)
for cgmode in cfarray_to_list(cgmodes)]
cf.CFRelease(cgmodes)
return modes
def get_mode(self):
cgmode = c_void_p(quartz.CGDisplayCopyDisplayMode(self._cg_display_id))
mode = CocoaScreenMode(self, cgmode)
quartz.CGDisplayModeRelease(cgmode)
return mode
def set_mode(self, mode):
assert mode.screen is self
quartz.CGDisplayCapture(self._cg_display_id)
quartz.CGDisplaySetDisplayMode(self._cg_display_id, mode.cgmode, None)
self.width = mode.width
self.height = mode.height
def restore_mode(self):
quartz.CGDisplaySetDisplayMode(
self._cg_display_id, self._default_mode.cgmode, None)
quartz.CGDisplayRelease(self._cg_display_id)
def capture_display(self):
quartz.CGDisplayCapture(self._cg_display_id)
def release_display(self):
quartz.CGDisplayRelease(self._cg_display_id)
class CocoaScreenMode(ScreenMode):
def __init__(self, screen, cgmode):
super().__init__(screen)
quartz.CGDisplayModeRetain(cgmode)
self.cgmode = cgmode
self.width = int(quartz.CGDisplayModeGetWidth(cgmode))
self.height = int(quartz.CGDisplayModeGetHeight(cgmode))
self.depth = self.getBitsPerPixel(cgmode)
self.rate = quartz.CGDisplayModeGetRefreshRate(cgmode)
def __del__(self):
quartz.CGDisplayModeRelease(self.cgmode)
self.cgmode = None
def getBitsPerPixel(self, cgmode):
# from
# /System/Library/Frameworks/IOKit.framework/Headers/graphics/IOGraphicsTypes.h
IO8BitIndexedPixels = "PPPPPPPP"
IO16BitDirectPixels = "-RRRRRGGGGGBBBBB"
IO32BitDirectPixels = "--------RRRRRRRRGGGGGGGGBBBBBBBB"
cfstring = c_void_p(quartz.CGDisplayModeCopyPixelEncoding(cgmode))
pixelEncoding = cfstring_to_string(cfstring)
cf.CFRelease(cfstring)
if pixelEncoding == IO8BitIndexedPixels:
return 8
if pixelEncoding == IO16BitDirectPixels:
return 16
if pixelEncoding == IO32BitDirectPixels:
return 32
return 0
class CocoaCanvas(Canvas):
def __init__(self, display, screen, nsview):
super().__init__(display)
self.screen = screen
self.nsview = nsview
| [
"leif.theden@gmail.com"
] | leif.theden@gmail.com |
6a4d8a465d39c66733cc610bd65c7d3bd8d6ee32 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp/sblp_ut=3.5_rd=1_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=41/sched.py | e32c511497a8def6c0ab223964299fde320c376c | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | -S 0 -X RUN -Q 0 -L 2 99 400
-S 0 -X RUN -Q 0 -L 2 67 200
-S 0 -X RUN -Q 0 -L 2 67 200
-S 1 -X RUN -Q 0 -L 2 44 250
-S 1 -X RUN -Q 0 -L 2 43 200
-S 1 -X RUN -Q 0 -L 2 43 250
-S 2 -X RUN -Q 1 -L 1 39 250
-S 2 -X RUN -Q 1 -L 1 39 400
-S 2 -X RUN -Q 1 -L 1 35 200
-S 2 -X RUN -Q 1 -L 1 35 250
-S 2 -X RUN -Q 1 -L 1 29 300
-S 3 -X RUN -Q 2 -L 1 28 175
-S 3 -X RUN -Q 2 -L 1 28 175
-S 3 -X RUN -Q 2 -L 1 26 250
-S 3 -X RUN -Q 2 -L 1 21 125
-S 3 -X RUN -Q 2 -L 1 20 100
-S 4 -X RUN -Q 3 -L 1 19 200
-S 4 -X RUN -Q 3 -L 1 18 175
-S 4 -X RUN -Q 3 -L 1 17 150
-S 4 -X RUN -Q 3 -L 1 15 125
-S 4 -X RUN -Q 3 -L 1 13 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
5d0c80d4877726e75d3f2a31ffb416c83d048c44 | 92f69f1f33f6b3aa29dc4f3ccce7d4a06eb24bdf | /deploy/creepageDistanceModel/models_creepage.py | 6ae75beb36e53ae310c65a43b5f31be4a4f18fdf | [
"MIT"
] | permissive | carlsummer/lcnn | 5d0b4c81e3b626e0380fdd36ad5685f3a6b9eb8f | b7ad7fa5502243ac50ca15a355e0001c5992d050 | refs/heads/master | 2023-06-21T05:45:44.910052 | 2021-07-29T00:55:33 | 2021-07-29T00:55:33 | 384,020,516 | 0 | 0 | MIT | 2021-07-08T06:13:42 | 2021-07-08T06:13:41 | null | UTF-8 | Python | false | false | 17,619 | py | import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import uuid
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .config import M
# 用于归一化的宽度值
NORMALIZATION_WIDTH = 64
NORMALIZATION_HEIGHT = 512
# 像素最大值为255
PIXS_MAX_VALUE = 255.0
# 数据类型
TB_DATATYPE = "tb"
LR_DATATYPE = "lr"
# 准确率容错距离
ACC_PX_THRESH=16
# 随机种子
RANDOM_SEED = 1024
__all__ = ["HourglassNet", "hg"]
class Bottleneck2D(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck2D, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class Hourglass(nn.Module):
def __init__(self, block, num_blocks, planes, depth):
super(Hourglass, self).__init__()
self.depth = depth
self.block = block
self.hg = self._make_hour_glass(block, num_blocks, planes, depth)
def _make_residual(self, block, num_blocks, planes):
layers = []
for i in range(0, num_blocks):
layers.append(block(planes * block.expansion, planes))
return nn.Sequential(*layers)
def _make_hour_glass(self, block, num_blocks, planes, depth):
hg = []
for i in range(depth):
res = []
for j in range(3):
res.append(self._make_residual(block, num_blocks, planes))
if i == 0:
res.append(self._make_residual(block, num_blocks, planes))
hg.append(nn.ModuleList(res))
return nn.ModuleList(hg)
def _hour_glass_forward(self, n, x):
up1 = self.hg[n - 1][0](x)
low1 = F.max_pool2d(x, 2, stride=2)
low1 = self.hg[n - 1][1](low1)
if n > 1:
low2 = self._hour_glass_forward(n - 1, low1)
else:
low2 = self.hg[n - 1][3](low1)
low3 = self.hg[n - 1][2](low2)
up2 = F.interpolate(low3, scale_factor=2)
out = up1 + up2
return out
def forward(self, x):
return self._hour_glass_forward(self.depth, x)
class HourglassNet(nn.Module):
"""Hourglass model from Newell et al ECCV 2016"""
def __init__(self, block, head, depth, num_stacks, num_blocks, num_classes):
super(HourglassNet, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_residual(block, self.inplanes, 1)
self.layer2 = self._make_residual(block, self.inplanes, 1)
self.layer3 = self._make_residual(block, self.num_feats, 1)
self.maxpool = nn.MaxPool2d(2, stride=2)
# build hourglass modules
ch = self.num_feats * block.expansion
# vpts = []
hg, res, fc, score, fc_, score_ = [], [], [], [], [], []
for i in range(num_stacks):
hg.append(Hourglass(block, num_blocks, self.num_feats, depth))
res.append(self._make_residual(block, self.num_feats, num_blocks))
fc.append(self._make_fc(ch, ch))
score.append(head(ch, num_classes))
# vpts.append(VptsHead(ch))
# vpts.append(nn.Linear(ch, 9))
# score.append(nn.Conv2d(ch, num_classes, kernel_size=1))
# score[i].bias.data[0] += 4.6
# score[i].bias.data[2] += 4.6
if i < num_stacks - 1:
fc_.append(nn.Conv2d(ch, ch, kernel_size=1))
score_.append(nn.Conv2d(num_classes, ch, kernel_size=1))
self.hg = nn.ModuleList(hg)
self.res = nn.ModuleList(res)
self.fc = nn.ModuleList(fc)
self.score = nn.ModuleList(score)
# self.vpts = nn.ModuleList(vpts)
self.fc_ = nn.ModuleList(fc_)
self.score_ = nn.ModuleList(score_)
def _make_residual(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_fc(self, inplanes, outplanes):
bn = nn.BatchNorm2d(inplanes)
conv = nn.Conv2d(inplanes, outplanes, kernel_size=1)
return nn.Sequential(conv, bn, self.relu)
def forward(self, x):
out = []
# out_vps = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.maxpool(x)
x = self.layer2(x)
x = self.layer3(x)
for i in range(self.num_stacks):
y = self.hg[i](x)
y = self.res[i](y)
y = self.fc[i](y)
score = self.score[i](y)
# pre_vpts = F.adaptive_avg_pool2d(x, (1, 1))
# pre_vpts = pre_vpts.reshape(-1, 256)
# vpts = self.vpts[i](x)
out.append(score)
# out_vps.append(vpts)
if i < self.num_stacks - 1:
fc_ = self.fc_[i](y)
score_ = self.score_[i](score)
x = x + fc_ + score_
return out[::-1], y # , out_vps[::-1]
def hg(**kwargs):
model = HourglassNet(
Bottleneck2D,
head=kwargs.get("head", lambda c_in, c_out: nn.Conv2D(c_in, c_out, 1)),
depth=kwargs["depth"],
num_stacks=kwargs["num_stacks"],
num_blocks=kwargs["num_blocks"],
num_classes=kwargs["num_classes"],
)
return model
FEATURE_DIM = 8
class LineVectorizer(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
lambda_ = torch.linspace(0, 1, M.n_pts0)[:, None]
self.register_buffer("lambda_", lambda_)
self.do_static_sampling = M.n_stc_posl + M.n_stc_negl > 0
self.fc1 = nn.Conv2d(256, M.dim_loi, 1)
scale_factor = M.n_pts0 // M.n_pts1
self.pooling = nn.MaxPool1d(scale_factor, scale_factor)
self.fc2 = nn.Sequential(
nn.Linear(M.dim_loi * M.n_pts1 + FEATURE_DIM, M.dim_fc),
nn.ReLU(inplace=True),
nn.Linear(M.dim_fc, M.dim_fc),
nn.ReLU(inplace=True),
nn.Linear(M.dim_fc, 1),
)
def forward(self, image,junc,jtyp,Lpos):
result = self.backbone(image)
h = result["preds"]
x = self.fc1(result["feature"])
n_batch, n_channel, row, col = x.shape
xs, ys, fs, ps, idx, = [], [], [], [], [0]
i = 0
p, label, feat = self.sample_lines(
junc,jtyp,Lpos, h["jmap"][i], h["joff"][i]
)
# print("p.shape:", p.shape)
ys.append(label)
ps.append(p)
fs.append(feat)
p = p[:, 0:1, :] * self.lambda_ + p[:, 1:2, :] * (1 - self.lambda_) - 0.5
p = p.reshape(-1, 2) # [N_LINE x N_POINT, 2_XY]
px, py = p[:, 1].contiguous(), p[:, 0].contiguous()
px0 = px.floor().clamp(min=0, max=int(NORMALIZATION_WIDTH / 4)-1)
py0 = py.floor().clamp(min=0, max=int(NORMALIZATION_HEIGHT / 4)-1)
px1 = (px0 + 1).clamp(min=0, max=int(NORMALIZATION_WIDTH / 4)-1)
py1 = (py0 + 1).clamp(min=0, max=int(NORMALIZATION_HEIGHT / 4)-1)
px0l, py0l, px1l, py1l = px0.long(), py0.long(), px1.long(), py1.long()
# xp: [N_LINE, N_CHANNEL, N_POINT]
xp = (
(
x[i, :, py0l, px0l] * (px1 - px) * (py1 - py)
+ x[i, :, py0l, px1l] * (px - px0) * (py1 - py)
+ x[i, :, py1l, px0l] * (px1 - px) * (py - py0)
+ x[i, :, py1l, px1l] * (px - px0) * (py - py0)
)
.reshape(n_channel, -1, M.n_pts0)
.permute(1, 0, 2)
)
xp = self.pooling(xp)
xs.append(xp)
idx.append(idx[-1] + xp.shape[0])
x, y = torch.cat(xs), torch.cat(ys)
f = torch.cat(fs)
x = x.reshape(-1, M.n_pts1 * M.dim_loi)
x = torch.cat([x, f], 1)
x = self.fc2(x).flatten()
p = torch.cat(ps)
s = torch.sigmoid(x)
b = s > 0.5
lines = []
score = []
for i in range(n_batch):
p0 = p[idx[i] : idx[i + 1]]
s0 = s[idx[i] : idx[i + 1]]
mask = b[idx[i] : idx[i + 1]]
p0 = p0[mask]
s0 = s0[mask]
if len(p0) == 0:
lines.append(torch.zeros([1, M.n_out_line, 2, 2], device=p.device))
score.append(torch.zeros([1, M.n_out_line], device=p.device))
else:
v, arg = torch.sort(s0,descending=True)
# arg = torch.argsort(s0, descending=True)
p0, s0 = p0[arg], s0[arg]
lines.append(p0[None, torch.arange(M.n_out_line) % len(p0)])
score.append(s0[None, torch.arange(M.n_out_line) % len(s0)])
return torch.cat(lines), torch.cat(score)
def sample_lines(self, junc,jtyp,Lpos, jmap, joff):
with torch.no_grad():
n_type = jmap.shape[0]
jmap = non_maximum_suppression(jmap).reshape(n_type, -1)
# jmap = jmap.reshape(n_type, -1)
joff = joff.reshape(n_type, 2, -1)
max_K = M.n_dyn_junc // n_type
N = len(junc)
K = min(int((jmap > M.eval_junc_thres).float().sum().item()), max_K)
if K < 2:
K = 2
device = jmap.device
# index: [N_TYPE, K]
score, index = torch.topk(jmap, k=K)
y = (index / int(NORMALIZATION_WIDTH / 4)).float() + torch.gather(joff[:, 0], 1, index) + 0.5
x = (index % int(NORMALIZATION_WIDTH / 4)).float() + torch.gather(joff[:, 1], 1, index) + 0.5
# xy: [N_TYPE, K, 2]
xy = torch.cat([y[..., None], x[..., None]], dim=-1)
xy_ = xy[..., None, :]
del x, y, index
# dist: [N_TYPE, K, N]
dist = torch.sum((xy_ - junc) ** 2, -1)
cost, match = torch.min(dist, -1)
# xy: [N_TYPE * K, 2]
# match: [N_TYPE, K]
for t in range(n_type):
match[t, jtyp[match[t]] != t] = N
match[cost > 1.5 * 1.5] = N
match = match.flatten()
_ = torch.arange(n_type * K, device=device)
u, v = torch.meshgrid(_, _)
u, v = u.flatten(), v.flatten()
up, vp = match[u], match[v]
label = Lpos[up, vp]
c = u < v
# sample lines
u, v, label = u[c], v[c], label[c]
xy = xy.reshape(n_type * K, 2)
xyu, xyv = xy[u], xy[v]
u2v = xyu - xyv
u2v /= torch.sqrt((u2v ** 2).sum(-1, keepdim=True)).clamp(min=1e-6)
feat = torch.cat(
[
xyu / torch.tensor([int(NORMALIZATION_HEIGHT / 4),int(NORMALIZATION_WIDTH / 4)]).to(device) * M.use_cood,
xyv / torch.tensor([int(NORMALIZATION_HEIGHT / 4),int(NORMALIZATION_WIDTH / 4)]).to(device) * M.use_cood,
u2v * M.use_slop,
(u[:, None] > K).float(),
(v[:, None] > K).float(),
],
1,
)
line = torch.cat([xyu[:, None], xyv[:, None]], 1)
return line, label.float(), feat
def non_maximum_suppression(a):
# output = F.max_pool2d(a, 3,stride=1)
# ap = F.interpolate(output.unsqueeze(0), size=a.shape[1:], mode='bilinear', align_corners=True)
# ap = ap.squeeze(0)
# mask = (a == ap).float().clamp(min=0.0)
# return a * mask
# au = a.unsqueeze(0)
# output, indices = F.max_pool2d(au, 3, stride=1, return_indices=True)
# ap = F.max_unpool2d(output, indices, 3, stride=1,output_size=au.shape)
# ap = ap.squeeze(0)
# mask = (a == ap).float().clamp(min=0.0)
# return a * mask
# 等价于下面的
ap = F.max_pool2d(a.unsqueeze(0), 3, stride=1, padding=1)
ap = ap.squeeze(0)
mask = (a == ap).float().clamp(min=0.0)
return a * mask
class Bottleneck1D(nn.Module):
def __init__(self, inplanes, outplanes):
super(Bottleneck1D, self).__init__()
planes = outplanes // 2
self.op = nn.Sequential(
nn.BatchNorm1d(inplanes),
nn.ReLU(inplace=True),
nn.Conv1d(inplanes, planes, kernel_size=1),
nn.BatchNorm1d(planes),
nn.ReLU(inplace=True),
nn.Conv1d(planes, planes, kernel_size=3, padding=1),
nn.BatchNorm1d(planes),
nn.ReLU(inplace=True),
nn.Conv1d(planes, outplanes, kernel_size=1),
)
def forward(self, x):
return x + self.op(x)
class MultitaskHead(nn.Module):
def __init__(self, input_channels, num_class):
super(MultitaskHead, self).__init__()
m = int(input_channels / 4)
heads = []
for output_channels in sum(M.head_size, []):
heads.append(
nn.Sequential(
nn.Conv2d(input_channels, m, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(m, output_channels, kernel_size=1),
)
)
self.heads = nn.ModuleList(heads)
assert num_class == sum(sum(M.head_size, []))
def forward(self, x):
return torch.cat([head(x) for head in self.heads], dim=1)
class MultitaskLearner(nn.Module):
def __init__(self, backbone):
super(MultitaskLearner, self).__init__()
self.backbone = backbone
head_size = M.head_size
self.num_class = sum(sum(head_size, []))
self.head_off = np.cumsum([sum(h) for h in head_size])
def forward(self, image):
outputs, feature = self.backbone(image)
result = {"feature": feature}
batch, channel, row, col = outputs[0].shape
n_jtyp = 1 # batch_size
offset = self.head_off
output=outputs[0]
output = output.transpose(0, 1).reshape([-1, batch, row, col]).contiguous()
jmap = output[0 : offset[0]].reshape(n_jtyp, 2, batch, row, col)
joff = output[offset[1] : offset[2]].reshape(n_jtyp, 2, batch, row, col)
result["preds"] = {
"jmap": jmap.permute(2, 0, 1, 3, 4).softmax(2)[:, :, 1],
"joff": joff.permute(2, 0, 1, 3, 4).sigmoid() - 0.5,
}
return result
def pline(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def plambda(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
return ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
def postprocess(lines, scores, threshold=0.01, tol=1e9, do_clip=False):
nlines, nscores = [], []
for (p, q), score in zip(lines, scores):
start, end = 0, 1
for a, b in nlines:
if (
min(
max(pline(*p, *q, *a), pline(*p, *q, *b)),
max(pline(*a, *b, *p), pline(*a, *b, *q)),
)
> threshold ** 2
):
continue
lambda_a = plambda(*p, *q, *a)
lambda_b = plambda(*p, *q, *b)
if lambda_a > lambda_b:
lambda_a, lambda_b = lambda_b, lambda_a
lambda_a -= tol
lambda_b += tol
# case 1: skip (if not do_clip)
if start < lambda_a and lambda_b < end:
continue
# not intersect
if lambda_b < start or lambda_a > end:
continue
# cover
if lambda_a <= start and end <= lambda_b:
start = 10
break
# case 2 & 3:
if lambda_a <= start and start <= lambda_b:
start = lambda_b
if lambda_a <= end and end <= lambda_b:
end = lambda_a
if start >= end:
break
if start >= end:
continue
nlines.append(np.array([p + (q - p) * start, p + (q - p) * end]))
nscores.append(score)
return np.array(nlines), np.array(nscores) | [
"zengxh@chint.com"
] | zengxh@chint.com |
f14b9015107fd96344bbe692fb7204178aa721d2 | 3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1 | /nlp/src/chunkerScorer.py | 57c8e2a06f10c06d1990556f4473081946c9d041 | [] | no_license | stefie10/slu_hri | a76f79094bd1740676fec5d889411ba3b1d9dc26 | 50753379953e1ff822162eeab094cffe4a30f3e1 | refs/heads/master | 2022-12-14T01:07:51.522258 | 2020-08-31T00:50:12 | 2020-08-31T00:50:12 | 291,386,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,720 | py | from routeDirectionCorpusReader import readSession
from chunker import makeTagger, tokenize
class ConfusionMatrix:
def __init__(self):
self.TP = 0.0
self.FP = 0.0
self.TN = 0.0
self.FN = 0.0
@property
def numberOfExamples(self):
return self.TP + self.FP + self.TN + self.FN
@property
def accuracy(self):
if self.numberOfExamples == 0:
return 0.0
else:
return float(self.TP + self.FP) / (self.TP + self.FP + self.FN + self.TN)
@property
def precision(self):
if self.numberOfExamples == 0:
return 0.0
else:
return float(self.TP) / (self.TP + self.FP)
@property
def recall(self):
if self.numberOfExamples == 0:
return 0.0
else:
return float(self.TP) / (self.TP + self.FN)
@property
def f1(self):
return 2.0 * self.precision * self.recall / (self.precision + self.recall)
def findMatch(testAnnotation, groundTruthAnnotations, matchFunction):
for i, groundTruthAnnotation in enumerate(groundTruthAnnotations):
if matchFunction(testAnnotation, groundTruthAnnotation):
return i, groundTruthAnnotation
return None, None
def ppMatch(x, y):
return ((x.spatialRelation.range == y.spatialRelation.range) and
(x.landmark.range == y.landmark.range))
def npMatch(x, y):
return x.landmark.range == y.landmark.range
def score(groundTruthSessions, testSessions):
tagger = makeTagger()
cm = ConfusionMatrix()
for groundTruth in groundTruthSessions:
testSession = testSessions[groundTruth]
for instructionIdx, instruction in enumerate(groundTruth.routeInstructions):
groundTruthAnnotations = groundTruth.routeAnnotations[instructionIdx]
indexes, tokens = tokenize(instruction)
print "tokens", tokens
tags = tagger.tag(tokens)
print " ".join(["%s/%s" % (word, tag)
for word, tag in tags])
matchedIndexes = [False for g in groundTruthAnnotations]
if len(groundTruthAnnotations) != 0:
print "considering", groundTruth.key, "instruction", instructionIdx
for testAnnotation in testSession.routeAnnotations[instructionIdx]:
idx, groundTruthMatch = findMatch(testAnnotation,
groundTruthAnnotations,
npMatch)
if groundTruthMatch is None:
print "fp", testAnnotation
cm.FP += 1
else:
print "tp", testAnnotation
print "\tmatched", groundTruthMatch
cm.TP += 1
matchedIndexes[idx] = True
for i, hasMatch in enumerate(matchedIndexes):
if not hasMatch:
cm.FN += 1
print "fn", groundTruthAnnotations[i]
#else:
# what to do with true negatives
print "precision", cm.precision
print "recall", cm.recall
print "f1", cm.f1
if __name__ == "__main__":
fname = "data/Direction understanding subjects Floor 1 (Final).ods"
#fname = "data/Direction understanding subjects Floor 1.ods"
groundTruthSessions = readSession(fname, "stefie10")
testSessions = readSession(fname, "regexp_chunker")
score(groundTruthSessions, testSessions)
| [
"stefie10@alum.mit.edu"
] | stefie10@alum.mit.edu |
b4bdb3c591598f57390240cf90a2ad80f0bde29b | 60e34c75afec810f4b1c2c82495d8d3017f32d33 | /09概率组合数学/03jump.py | b9ec6a464063c03077318da1157013605b35d5c4 | [] | no_license | ares5221/Data-Structures-and-Algorithms | af97c6b34b810c37f152af595846870a7b9b304b | 7c51eee0c375136f995cc063ffc60d33a520d748 | refs/heads/master | 2021-07-17T21:18:46.556958 | 2018-12-03T07:30:13 | 2018-12-03T07:30:13 | 144,227,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
'''
给定非负整数数组,初始时在数组起始位置放置一机器人,数组的每个元素表示在当前
位置机器人最大能够跳跃的数目。它的目的是用最少的步数到达数组末端。例如:给定
数组A=[2,3,1,1,2],最少跳步数目是2,对应的跳法是:232。
不能简单使用贪心。
初始步数step赋值为0;
记当前步的控制范围是[i,j],则用k遍历i到j
计算A[k]+k的最大值,记做j2;
step++;继续遍历[j+1,j2]
'''
def Jump(a, n):
if n == 1:
return 0
step, i, j = 0, 0, 0
while j < n:
step += 1
j2 = j
for k in range(i, j + 1):
j2 = max(j2, k + a[k])
if j2 >= n - 1:
return step
i = j + 1
j = j2
if j < i:
return -1
return step
if __name__ == '__main__':
sa = [2, 3, 1, 1, 2, 4, 1, 1, 6, 1, 7]
res = Jump(sa, len(sa))
print('最小步数', res)
| [
"674361437@qq.com"
] | 674361437@qq.com |
b4632e7af892ed9695871d7307a6e394648aaa00 | 49c4d5ddda86f05c15587c13cda11f9a40e4c4f1 | /yggdrasil/metaschema/datatypes/ContainerMetaschemaType.py | 8878fb0740b5e018c8d83165dd2bda0b017ec49a | [
"BSD-3-Clause"
] | permissive | ritviksahajpal/yggdrasil | 816c314db9fa48d5e8effbe498c014c7efd063ec | 777549413719918ba208d73018da4df678a1754e | refs/heads/master | 2020-05-17T12:58:44.339879 | 2019-04-24T21:21:56 | 2019-04-24T21:21:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,876 | py | from yggdrasil.metaschema.datatypes import (
get_type_class, complete_typedef, encode_data, encode_data_readable)
from yggdrasil.metaschema.datatypes.MetaschemaType import MetaschemaType
class ContainerMetaschemaType(MetaschemaType):
r"""Type associated with a container of subtypes."""
name = 'container'
description = 'A container of other types.'
python_types = []
_container_type = None
_json_type = None
_json_property = None
def __init__(self, *args, **kwargs):
self._typecls = self._container_type()
super(ContainerMetaschemaType, self).__init__(*args, **kwargs)
@classmethod
def _iterate(cls, container):
r"""Iterate over the contents of the container. Each element returned
should be a tuple including an index and a value.
Args:
container (obj): Object to be iterated over.
Returns:
iterator: Iterator over elements in the container.
"""
raise NotImplementedError("This must be overwritten by the subclass.")
@classmethod
def _assign(cls, container, index, value):
r"""Assign an element in the container to the specified value.
Args:
container (obj): Object that element will be assigned to.
index (obj): Index in the container object where element will be
assigned.
value (obj): Value that will be assigned to the element in the
container object.
"""
raise NotImplementedError("This must be overwritten by the subclass.")
@classmethod
def _has_element(cls, container, index):
r"""Check to see if an index is in the container.
Args:
container (obj): Object that should be checked for index.
index (obj): Index that should be checked for.
Returns:
bool: True if the index is in the container.
"""
raise NotImplementedError("This must be overwritten by the subclass.")
@classmethod
def _get_element(cls, container, index, default):
r"""Get an element from the container if it exists, otherwise return
the default.
Args:
container (obj): Object that should be returned from.
index (obj): Index of element that should be returned.
default (obj): Default that should be returned if the index is not
in the container.
Returns:
object: Container contents at specified element.
"""
out = default
if cls._has_element(container, index):
out = container[index]
return out
@classmethod
def encode_data(cls, obj, typedef):
r"""Encode an object's data.
Args:
obj (object): Object to encode.
typedef (dict): Type definition that should be used to encode the
object.
Returns:
string: Encoded object.
"""
container = cls._container_type()
for k, v in cls._iterate(obj):
vtypedef = None
if cls._json_property in typedef:
vtypedef = cls._get_element(typedef[cls._json_property], k, None)
vbytes = encode_data(v, typedef=vtypedef)
cls._assign(container, k, vbytes)
return container
@classmethod
def encode_data_readable(cls, obj, typedef):
r"""Encode an object's data in a readable format.
Args:
obj (object): Object to encode.
typedef (dict): Type definition that should be used to encode the
object.
Returns:
string: Encoded object.
"""
container = cls._container_type()
for k, v in cls._iterate(obj):
if cls._json_property in typedef:
vtypedef = cls._get_element(typedef[cls._json_property], k, None)
else:
vtypedef = None
vbytes = encode_data_readable(v, typedef=vtypedef)
cls._assign(container, k, vbytes)
return container
@classmethod
def decode_data(cls, obj, typedef):
r"""Decode an object.
Args:
obj (string): Encoded object to decode.
typedef (dict): Type definition that should be used to decode the
object.
Returns:
object: Decoded object.
"""
container = cls._container_type()
for k, v in cls._iterate(obj):
vtypedef = cls._get_element(typedef[cls._json_property], k, {})
vcls = get_type_class(vtypedef['type'])
cls._assign(container, k, vcls.decode_data(v, vtypedef))
return container
@classmethod
def extract_typedef(cls, metadata):
r"""Extract the minimum typedef required for this type from the provided
metadata.
Args:
metadata (dict): Message metadata.
Returns:
dict: Encoded type definition with unncessary properties removed.
"""
out = super(ContainerMetaschemaType, cls).extract_typedef(metadata)
if cls._json_property in out:
contents = out[cls._json_property]
if isinstance(contents, cls.python_types):
for k, v in cls._iterate(contents):
if 'type' in v:
vcls = get_type_class(v['type'])
cls._assign(contents, k, vcls.extract_typedef(v))
out[cls._json_property] = contents
return out
def update_typedef(self, **kwargs):
r"""Update the current typedef with new values.
Args:
**kwargs: All keyword arguments are considered to be new type
definitions. If they are a valid definition property, they
will be copied to the typedef associated with the instance.
Returns:
dict: A dictionary of keyword arguments that were not added to the
type definition.
"""
map = kwargs.get(self._json_property, None)
map_out = self._container_type()
if isinstance(map, self.python_types):
for k, v in self._iterate(map):
v_typedef = complete_typedef(v)
if self._has_element(self._typecls, k):
self._assign(map_out, k,
self._typecls[k].update_typedef(**v_typedef))
else:
self._assign(self._typecls, k,
get_type_class(v_typedef['type'])(**v_typedef))
self._assign(map, k, self._typecls[k]._typedef)
kwargs[self._json_property] = map
out = super(ContainerMetaschemaType, self).update_typedef(**kwargs)
if map_out:
out[self._json_property] = map_out
return out
| [
"langmm.astro@gmail.com"
] | langmm.astro@gmail.com |
87bfbb8dd54b0bf8bbc656327512c7e6baf6580c | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/monitor/azure-monitor-query/samples/sample_batch_query.py | 50d45f0e0b24bde23a6296c242aa30f492e9df3a | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 2,506 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
FILE: sample_batch_query.py
DESCRIPTION:
This sample demonstrates querying multiple queries in a batch.
USAGE:
python sample_batch_query.py
Set the environment variables with your own values before running the sample:
1) LOGS_WORKSPACE_ID - The The first (primary) workspace ID.
This example uses DefaultAzureCredential, which requests a token from Azure Active Directory.
For more information on DefaultAzureCredential, see https://docs.microsoft.com/python/api/overview/azure/identity-readme?view=azure-python#defaultazurecredential.
**Note** - Although this example uses pandas to print the response, it's optional and
isn't a required package for querying. Alternatively, native Python can be used as well.
"""
from datetime import datetime, timedelta, timezone
import os
import pandas as pd
from azure.monitor.query import LogsQueryClient, LogsBatchQuery, LogsQueryStatus
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential()
client = LogsQueryClient(credential)
# [START send_query_batch]
requests = [
LogsBatchQuery(
query="AzureActivity | summarize count()",
timespan=timedelta(hours=1),
workspace_id= os.environ['LOGS_WORKSPACE_ID']
),
LogsBatchQuery(
query= """bad query""",
timespan=timedelta(days=1),
workspace_id= os.environ['LOGS_WORKSPACE_ID']
),
LogsBatchQuery(
query= """let Weight = 92233720368547758;
range x from 1 to 3 step 1
| summarize percentilesw(x, Weight * 100, 50)""",
workspace_id= os.environ['LOGS_WORKSPACE_ID'],
timespan=(datetime(2021, 6, 2, tzinfo=timezone.utc), datetime(2021, 6, 5, tzinfo=timezone.utc)), # (start, end)
include_statistics=True
),
]
results = client.query_batch(requests)
for res in results:
if res.status == LogsQueryStatus.FAILURE:
# this will be a LogsQueryError
print(res)
elif res.status == LogsQueryStatus.PARTIAL:
## this will be a LogsQueryPartialResult
print(res.partial_error)
for table in res.partial_data:
df = pd.DataFrame(table.rows, columns=table.columns)
print(df)
elif res.status == LogsQueryStatus.SUCCESS:
## this will be a LogsQueryResult
table = res.tables[0]
df = pd.DataFrame(table.rows, columns=table.columns)
print(df)
# [END send_query_batch]
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
241673ed21e518b4c60725b37b18b79c68d3dc85 | 0dc8ddc02b9efc07f16ccd0e15cda4eb9c773763 | /fjfundo/mensalidades/tests/test_model_fundo.py | 7087a90a029e79b37a813fdc4912d09e3934746d | [] | no_license | juniorcarvalho/fjfundo | 04a8913e945101c7e47b6be1663af03c47149445 | 44e3e15c69e8648d7330859f9edf9e62655fe8f6 | refs/heads/master | 2020-09-17T15:34:31.674213 | 2016-10-27T16:19:34 | 2016-10-27T16:19:34 | 66,687,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | from django.test import TestCase
from datetime import date
from fjfundo.mensalidades.models import Fundo
class FundoModelTest(TestCase):
def setUp(self):
self.fundo = Fundo.objects.create(
nome_fundo='fundo de formatura',
data_inicial=date(2016, 1, 1),
data_final=date(2016, 12, 31),
cnpj='00000000000000'
)
def test_create(self):
self.assertTrue(Fundo.objects.exists())
def test_str(self):
self.assertEqual('fundo de formatura', str(self.fundo))
| [
"joseadolfojr@gmail.com"
] | joseadolfojr@gmail.com |
945f254e902ef50da52591ba46d2709f73a6b1b0 | e5d74b142a03d7cccc4acd5fdcdc2af7c47dd728 | /dynamo_engine/fields.py | e1b34a3012e7b72e586163ca66c3de019507e5aa | [
"MIT"
] | permissive | eshandas/dynamo_engine | 6f10d33a0d118dbb6bae3d896690698fd63428bd | a6c245ae2618459370ee68e43b4b824ba454f0b4 | refs/heads/master | 2021-01-25T04:22:05.655529 | 2017-06-06T17:58:20 | 2017-06-06T17:58:20 | 93,432,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from base.fields import (
BaseField,
STRING,
)
AVAILABLE_KEY_FIELDS = (
'HashKeyField', 'RangeKeyField', )
AVAILABLE_FIELDS = (
'StringField', )
class HashKeyField(BaseField):
KEY_TYPE = 'HASH'
def __init__(self, type):
self.ATTRIBUTE_TYPE = type
class RangeKeyField(BaseField):
KEY_TYPE = 'RANGE'
def __init__(self, type):
self.ATTRIBUTE_TYPE = type
class StringField(BaseField):
ATTRIBUTE_TYPE = STRING
| [
"eshandasnit@gmail.com"
] | eshandasnit@gmail.com |
c19a50a6473c9a473814ca3bfb3b895e067a35e1 | 88e8e28b58092d5ba051582930c156872b9565a5 | /ABC/ABC.py | 4969b12f1de51c97fc10514845edcb0e14a9e838 | [] | no_license | dorahero/crawlers | b8a4a1c2592e817b365d56a87bee021d29598810 | 88e134fdd2493330622848f931638aabd6c906fe | refs/heads/master | 2023-02-19T07:54:54.945144 | 2021-01-23T09:13:42 | 2021-01-23T09:13:42 | 276,884,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,222 | py | import requests
import json
import os
from bs4 import BeautifulSoup
import re
ss = requests.session()
url = 'https://www.abccar.com.tw/abcapi/car/GetSearchCarBrand'
useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
headers = {'User-Agent': useragent}
res_1 = requests.post(url, headers=headers)
json_c = json.loads(res_1.text)
print(json_c)
BrandID = {j['BrandID']: j['Name'] for j in json_c}
print(BrandID)
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
# data = {'BrandID': '75'}
# res = requests.post(url, headers=headers, json=data)
# json_c = json.loads(res.text)
# print(json_c)
# SeriesID = []
# for j in json_c:
# SeriesID.append(j['SeriesID'])
# print(SeriesID)
count = 0
img_url = []
cid = set()
cars = 0
for f, brand in enumerate(BrandID):
ss.cookies.clear()
print(f, brand, BrandID[brand])
url = 'https://www.abccar.com.tw//abcapi/search'
useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
data = {'brand': '{}'.format(brand),
'tab': '1',
'SearchType': '1',
'BrandKey': '{}'.format(BrandID[brand][0].upper())
}
headers = {'User-Agent': useragent}
res_2 = requests.post(url, headers=headers, data=data)
json_c = json.loads(res_2.text)
page_num = int(json_c['Total'])
cars += page_num
print(page_num, '輛車')
print(int(page_num / 40) + 1, '總頁數')
for t in range(int(page_num / 40) + 1):
print(t+1, 'page')
data = {'brand': '{}'.format(brand),
'tab': '1',
'SearchType': '1',
'BrandKey': '{}'.format(BrandID[brand][0].upper()),
'page': '{}'.format(t + 1),
'series': '0',
'category': '0',
'SeriesGroup': '0',
'Dealer': '0'
}
headers = {'User-Agent': useragent}
res_3 = ss.post(url, headers=headers, data=data)
try:
json_c = json.loads(res_3.text)
soup = BeautifulSoup(json_c['List'], 'html.parser')
car_id = soup.select('a[class="abc-article__link"]')
print(len(car_id), '幾輛車')
for c in car_id:
cid.add(str(c['car-id']) + '_' + BrandID[brand])
count += 1
except Exception as e:
print(e)
print(cid)
print(len(cid))
print(cars)
cid_dict = {}
for b in BrandID:
tmp = []
for c in cid:
if c[8:] == BrandID[b]:
tmp.append(c[:7])
cid_dict[BrandID[b]] = tmp
print(cid_dict)
with open('./abc_kind.txt', 'w', encoding='utf-8') as f:
f.write(str(cid_dict))
# cars_num = 0
# for c in cid:
# url = 'https://www.abccar.com.tw/car/{}?car_source=index-top-dealer'.format(c)
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
#
# res = requests.get(url, headers=headers)
# soup = BeautifulSoup(res.text, 'html.parser')
# r = re.compile(r'<[^>]*>')
# try:
# j = r.sub('', str(soup.select('script[type="application/ld+json"]')[0]))
# # 轉換非法字元
# json_car = json.loads(j)
# img_url.append(json_car['image'])
# cars_num += len(json_car['image'])
# except Exception as e:
# with open('./jsonerror.txt', 'a', encoding='utf-8') as f:
# f.write(str(e) + str(c) + '\n')
#
# print(cars_num)
# for x in range(len(soup.select('script'))):
# j = r.sub('', str(soup.select('script')[x]))
# try:
# json_car = json.loads(j)
# if 'makesOffer' not in j:
# continue
# else:
# img_url.append(json_car['makesOffer']['itemOffered']['image'])
# count += 1
# print(count)
# break
# except Exception as e:
# print(e)
# print("Not json")
# url = 'https://www.abccar.com.tw/abcapi/car/GetCarModelBrandSeriesCategory'
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
# res = requests.get(url, headers=headers)
# json_c = json.loads(res.text)
# count = 0
# for j in json_c:
# if 'SeriesID' in j:
# if j['SeriesID'] in SeriesID:
# count += 1
# print(j)
# print(count)
# print(75, 610, 3347, 1411044)
#
# url = 'https://www.abccar.com.tw/car/1411388?car_source=index-top-dealer'
# useragent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
# headers = {'User-Agent': useragent}
#
# res = requests.get(url, headers=headers)
# soup = BeautifulSoup(res.text, 'html.parser')
# print(len(soup.select('script')))
# r = re.compile(r'<[^>]*>')
# j = r.sub('', str(soup.select('script')[17]))
#
# json_car = json.loads(j)
# print(json_car['makesOffer']['itemOffered']['image'])
| [
"dorahero2727@gmail.com"
] | dorahero2727@gmail.com |
8a6cfdd608e14d3cbb4a1ff59922abf1df55eaa9 | db0e8aa3a92a30c9b1cc8da03725e951ff64f3f1 | /lenv/bin/easy_install-3.6 | 774c4f2f4024d4031c1909df56980d5946b48840 | [
"BSD-3-Clause"
] | permissive | shrey-c/DataLeakageDjango | ffeef61caa347520747fc70cf3f7f8b84a9610cf | a827c5a09e5501921f9fb97b656755671238dd63 | refs/heads/master | 2022-11-30T03:30:12.313025 | 2020-07-12T06:47:44 | 2020-07-12T06:47:44 | 242,569,637 | 6 | 1 | BSD-3-Clause | 2022-11-22T05:20:22 | 2020-02-23T18:33:04 | Python | UTF-8 | Python | false | false | 263 | 6 | #!/home/shreyansh/DataLeakageDjango/lenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"shreyansh.chheda@gmail.com"
] | shreyansh.chheda@gmail.com |
82eff230df94326a35032a2aab368a7418a28af3 | 10e3e350526641bedc7455b545463324020b8f4f | /gs12/api/serializers.py | e97d84e60c148a3482345f18134b7c33afc70382 | [] | no_license | shivamdattapurkayastha99/django-rest-studentdetail | da31566d6f289490f0b30a670df5325a44ad6523 | 2fac55908cd50e43c8c0feaee3e32e942cc28fb2 | refs/heads/master | 2023-02-18T01:23:39.433530 | 2021-01-15T17:53:38 | 2021-01-15T17:53:38 | 329,984,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from rest_framework import serializers
from .models import Student
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model=Student
fields=['id','name','roll','city']
| [
"shivamdatta465@gmail.com"
] | shivamdatta465@gmail.com |
b0e90db28ecf6add940a8671196edd2086bb23bb | 6679ab23bf4f0100eb07cf13be21a8c1b1ae4c1f | /Python_Team_Notes/Graph_Theory/Topology_Sort.py | 80c74e3505ee3644db096c34335d5e856ada6d2a | [] | no_license | gimquokka/problem-solving | 1c77e0ad1828fa93ebba360dcf774e38e157d7b6 | f3c661241d3e41adee330d19db3a66e20d23cf50 | refs/heads/master | 2023-06-28T10:19:07.230366 | 2021-07-29T11:29:26 | 2021-07-29T11:29:26 | 365,461,737 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | """
- 위상 정렬(Topology Sort)
: 방향 그래프의 모든 노드를 '방향성에 거스르지 않도록 순서대로 나열하는 것
- 전략
1. 진입차수가 0인 노드를 큐에 넣는다
2. 큐가 빌 때까지 다음의 과정을 반복한다.
(1) 큐에서 원소를 꺼내 해당 노드에서 출발하는 간선을 그래프에서 제거하낟.
(2) 새롭게 진입차수가 0이 된 노드를 큐에 넣는다.
- Time Complexity
O(V+E) # 모든 간선과 노드를 확인함으로
"""
from collections import deque
import sys
input = sys.stdin.readline
# 노드와 간선의 개수를 입력받기
n, m = map(int, input().split())
# 모든 노드에 대한 진입차수는 0으로 초기화
indegree = [0]*(n+1)
# 각 노드에 연결된 간선 정보를 담기 위한 연결 리스트(그래프) 초기화
graph = [[] for _ in range(n+1)]
# 방향 그래프의 모든 간선 정보를 입력받기
for i in range(m):
a, b = map(int, input().split())
graph[a].append(b) # 정점 A에서 B로 이동 가능
# B의 진입차수를 1 증가
indegree[b] += 1
# 위상 정렬 함수
def topology_sort():
result = []
q = deque()
# 처음 시작할 때는 진입차수가 0인 노드를 큐에 삽입. 0이 들어가면 안됨
for i in range(1, n+1):
if indegree[i] == 0:
q.append(i)
# 큐가 빌 때까지 반복 => 큐가 중간에 빈다면 Cycle, 즉, 위상정렬 불가능
while q:
# 큐에서 원소 꺼내기
now = q.popleft()
result.append(now)
# 해당 원소와 연결된 노드들의 진입차수에서 1 빼기
for i in graph[now]:
indegree[i] -= 1
# 새롭게 진입차수가 0이 되는 노드를 큐에 삽입
if indegree[i] == 0:
q.append(i)
# 결과 값 반환
return result
print(*topology_sort())
| [
"gimquokka@gmail.com"
] | gimquokka@gmail.com |
17062f8eac0ac19c8d035d5cc0e5b3e4cdd6a5af | 87227a9153cda47b720227b3e7e1930936550f7c | /matting/torchscript_resnet50_fp32/code/__torch__/torch/nn/modules/conv/___torch_mangle_56.py | 22b1f3c5fcfaad896bb9b6203f0968e4f011fe4c | [] | no_license | gkyAiLab/Style_Transfer_Matting | 488e7a65d262893fc4b42c4c90544e2f9aee64e4 | 2f461fe8c206c5adade09ae29166d238439c09b2 | refs/heads/master | 2023-07-18T12:15:55.625543 | 2021-08-12T07:50:38 | 2021-08-12T07:50:38 | 390,946,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | class Conv2d(Module):
__parameters__ = ["weight", "bias", ]
__buffers__ = []
weight : Tensor
bias : Optional[Tensor]
training : bool
transposed : bool
_reversed_padding_repeated_twice : Tuple[int, int, int, int]
out_channels : Final[int] = 24
kernel_size : Final[Tuple[int, int]] = (3, 3)
in_channels : Final[int] = 42
output_padding : Final[Tuple[int, int]] = (0, 0)
padding_mode : Final[str] = "zeros"
stride : Final[Tuple[int, int]] = (1, 1)
dilation : Final[Tuple[int, int]] = (1, 1)
padding : Final[Tuple[int, int]] = (0, 0)
groups : Final[int] = 1
def forward(self: __torch__.torch.nn.modules.conv.___torch_mangle_56.Conv2d,
input: Tensor) -> Tensor:
_0 = (self)._conv_forward(input, self.weight, )
return _0
def _conv_forward(self: __torch__.torch.nn.modules.conv.___torch_mangle_56.Conv2d,
input: Tensor,
weight: Tensor) -> Tensor:
_1 = torch.conv2d(input, weight, self.bias, [1, 1], [0, 0], [1, 1], 1)
return _1
| [
"694813183@qq.com"
] | 694813183@qq.com |
bf2188086aa03fc72778fae06fef0ee0e83af51e | 8a1144dd38388992c7e35a4cc84002e381f2cf1f | /python/django_fundamentals/disappearing_ninjas/apps/disappearingninjas/urls.py | bc2fb6f77ea81d293c029bf67a660d788e235393 | [] | no_license | vin792/dojo_assignments | 18472e868610bacbd0b5141a5322628f4afefb5b | 449b752f92df224285bfd5d03901a3692a98562e | refs/heads/master | 2021-01-20T00:20:09.896742 | 2017-05-26T17:37:09 | 2017-05-26T17:37:09 | 82,735,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^ninjas$', views.display_all),
url(r'^ninjas/(?P<color>[a-zA-Z]+$)', views.turtles),
] | [
"vin792@gmail.com"
] | vin792@gmail.com |
726806c4033a9b0cacc59d787e6a56e2e4e0ae1c | fd173195d07b5a5ce229a0c1a20ee61884d8c8a1 | /python_practice/Dictionary_programs/10_check_order_of_char_ordered_dict.py | a35adb04cec3b877f1e5ff092c1c41e1fa084e34 | [] | no_license | anitrajpurohit28/PythonPractice | f7e71946144e04b7f9cb9682087e5d4f79839789 | 8b75b67c4c298a135a5f8ab0b3d15bf5738859f1 | refs/heads/master | 2023-04-12T07:04:12.150646 | 2021-04-24T19:52:24 | 2021-04-24T19:52:24 | 293,912,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,072 | py | # 10 Python | Check order of character in string using OrderedDict( )
"""
Input:
string = "engineers rock"
pattern = "er";
Output: true
Explanation:
All 'e' in the input string are before all 'r'.
Input:
string = "engineers rock"
pattern = "egr";
Output: false
Explanation:
There are two 'e' after 'g' in the input string.
Input:
string = "engineers rock"
pattern = "gsr";
Output: false
Explanation:
There are one 'r' before 's' in the input string.
"""
from collections import OrderedDict
def check_order_ordered_dict(input_str, pattern):
dict1 = OrderedDict.fromkeys(input_str)
pattern_match = 0
for key, value in dict1.items():
if key == pattern[pattern_match]:
pattern_match += 1
if pattern_match == len(pattern):
return True
return False
def check_order_naive(input_str, pattern):
for i in range(len(pattern)-1):
x = pattern[i]
# range is run till pattern len -1 to take care of
# Index Error
y = pattern[i+1]
# Now, check last occurrence of x should be lesser
# than first occurrence of y
x_last = input_str.rindex(x)
y_first = input_str.index(y)
if x_last == -1 or y_first == -1 or x_last > y_first:
return False
return True
input_str = 'engineers rock'
pattern = 'er'
print(f"input_str: {input_str}\n"
f"pattern: {pattern}\n"
f"result:")
print("Ordered dict:", check_order_ordered_dict(input_str, pattern))
print("Naive:", check_order_naive(input_str, pattern))
print()
input_str = 'engineers rock'
pattern = 'egr'
print(f"input_str: {input_str}\n"
f"pattern: {pattern}\n"
f"result:")
print("Ordered dict:", check_order_ordered_dict(input_str, pattern))
print("Naive:", check_order_naive(input_str, pattern))
print()
input_str = 'engineers rock'
pattern = 'gsr'
print(f"input_str: {input_str}\n"
f"pattern: {pattern}\n"
f"result:")
print("Ordered dict:", check_order_ordered_dict(input_str, pattern))
print("Naive:", check_order_naive(input_str, pattern))
print()
| [
"anitrajpurohit28@gmail.com"
] | anitrajpurohit28@gmail.com |
9663c4f2964c0b5f8e7d8cb59cbcaa361449d8cc | f7c0b3bbd9409f76fa8c74c8f19b89cdc1800c4b | /msnmetrosim/controllers/base/__init__.py | dc87ccb66af510afdf4be4d70687004037101dee | [] | no_license | RaenonX/Madison-Metro-Sim | 4ae320d7ebcca6e050562bfc5624165a47867e53 | 24c31a1afd5241feebc38b8ddd10d1f3b7e228ef | refs/heads/master | 2023-02-24T01:14:30.269160 | 2020-12-20T15:51:26 | 2020-12-20T16:07:40 | 297,050,693 | 2 | 2 | null | 2020-11-01T21:31:38 | 2020-09-20T10:20:34 | Jupyter Notebook | UTF-8 | Python | false | false | 158 | py | """Controller base classes."""
from .fromcsv import CSVLoadableController
from .holder import DataListHolder
from .locational import LocationalDataController
| [
"raenonx0710@gmail.com"
] | raenonx0710@gmail.com |
76f4de44efb33da80730e9422dd9244c582aeae6 | 4202a7c678e0ec25ab2065c4c2804b0296f94480 | /VCFS/fix_vcf_headers.py | 89123a315d7c75faaf1eb3c36ebb9db2ec2680e5 | [] | no_license | kaiyaprovost/whole_genome_pipeline | f1c479536560c5b8c68fe3a5ba0917140fbb0793 | 8e605d855c9f0cd6e11e1b73a97260e0d4aa3fae | refs/heads/master | 2023-04-22T20:51:01.344297 | 2023-04-06T19:12:11 | 2023-04-06T19:12:11 | 237,044,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import sys
import os
import glob
try:
filepath = sys.argv[1]
print("\tFile is: ",filepath)
except:
print("Filename not given, quitting")
#filepath = "/Users/kprovost/Documents/Dissertation/CHAPTER2_GENOMES/ASSEMBLY/genomeresequencingFromLucas/for_AMN_245109/sedtest.txt"
exit()
split = filepath.split("/")
filename = split[-1]
print(split)
splitfile = filename.split(".")
prefix = splitfile[0]
print(splitfile)
print(prefix)
to_replace = "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t*"
replacement ="#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+prefix
# Read in the file
with open(filepath, 'r') as file :
filedata = file.read()
print("read")
# Replace the target string
filedata = filedata.replace(to_replace, replacement)
print("replaced 1")
to_replace = '##fileformat=VCFv4.0\n#CHROM'
replacement = '##fileformat=VCFv4.0\n##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n#CHROM'
# Replace the target string
filedata = filedata.replace(to_replace, replacement)
print("replaced 2")
# Write the file out again
with open(filepath, 'w') as file:
file.write(filedata)
print("wrote") | [
"17089935+kaiyaprovost@users.noreply.github.com"
] | 17089935+kaiyaprovost@users.noreply.github.com |
532990c39ecc04b056fad86a473af7e2f6c8637b | 453d2e699d218fdb3bc1e535a707988194ac6717 | /lib/pector/profile.py | cd69c6d93d8fc98a82b0aa1837cceccf54d5701a | [
"MIT"
] | permissive | defgsus/thegame | d54ffcd343c7e1805d2c11e24cd38b02243e73d4 | 38a627d9108f1418b94b08831fd640dd87fbba83 | refs/heads/master | 2023-07-23T06:32:40.297591 | 2022-04-11T12:02:32 | 2022-04-11T12:02:32 | 127,875,178 | 1 | 0 | MIT | 2023-07-06T22:07:07 | 2018-04-03T08:21:31 | Python | UTF-8 | Python | false | false | 6,662 | py | from .vec3 import vec3
import random
def rnd_vec3(mi=-1., ma=1.):
return vec3((random.uniform(mi, ma),
random.uniform(mi, ma),
random.uniform(mi, ma)))
def nbody_case(nbodies, nframes):
pos = [rnd_vec3() for i in range(nbodies)]
imp = [rnd_vec3() for i in range(nbodies)]
for it in range(nframes):
for i in range(len(pos)):
for j in range(i+1, len(pos)):
d = (pos[j] - pos[i])
l = d.length()
d /= l
a = 0.02 * l * d
imp[i] += a
imp[j] -= a
for i in range(len(pos)):
pos[i] += imp[i]
imp[i] *= 0.99
# TODO: i get
# File "/usr/lib/python3.4/cProfile.py", line 22, in <module>
# run.__doc__ = _pyprofile.run.__doc__
# AttributeError: 'module' object has no attribute 'run'
# without these:
def run(): pass
def runctx(): pass
if __name__ == "__main__":
def print_stats(prof):
stats = sorted(prof.getstats(), key=lambda t: -t[3]/t[1])
fmt = "%10s | %20s | %s"
print(fmt % ("time", "time per M calls", "name"))
for pe in stats:
print(fmt % (str(round(pe[3],8)), str(pe[3]/pe[1]*1.e+6), pe[0]))
def do_profile(code):
print("------ %s ------" % code)
import cProfile
prof = cProfile.Profile()
prof.run(code)
print_stats(prof)
do_profile("nbody_case(nbodies=32, nframes=50)")
"""
------ nbody_case(nbodies=32, nframes=50) ------
time | time per M calls | name
1.293539 | 1293539.0 | <built-in method exec>
1.293508 | 1293507.9999999998 | <code object <module> at 0x7ff78da22ed0, file "<string>", line 1>
1.293499 | 1293499.0 | <code object nbody_case at 0x7ff78d9f34b0, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 9>
0.000322 | 321.99999999999994 | <code object <listcomp> at 0x7ff78d9f3390, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 10>
0.000255 | 254.99999999999997 | <code object <listcomp> at 0x7ff78d9f3420, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 11>
0.36807 | 14.841532258064516 | <code object __sub__ at 0x7ff78d9fe930, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 102>
0.566942 | 11.430282258064516 | <code object _binary_operator at 0x7ff78da01780, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 143>
0.22724 | 9.162903225806451 | <code object __rmul__ at 0x7ff78d9fedb0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 115>
0.239938 | 9.088560606060605 | <code object __iadd__ at 0x7ff78d9fe810, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 98>
0.216015 | 8.710282258064517 | <code object __isub__ at 0x7ff78d9feb70, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 108>
0.000554 | 8.65625 | <code object rnd_vec3 at 0x7ff78da6cf60, file "/home/defgsus/prog/python/dev/pector/pector/profile.py", line 4>
0.520338 | 6.705386597938144 | <code object _binary_operator_inplace at 0x7ff78da01810, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 150>
0.279042 | 5.618596971649485 | <code object __init__ at 0x7ff78d9f38a0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 11>
0.259388 | 5.222857603092784 | <code object set at 0x7ff78da019c0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 163>
0.09277 | 3.7407258064516125 | <code object __itruediv__ at 0x7ff78da01270, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 128>
0.005101 | 3.1881249999999994 | <code object __imul__ at 0x7ff78d9feed0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 118>
0.073217 | 2.9522983870967736 | <code object <listcomp> at 0x7ff78da016f0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 148>
0.05166 | 2.0830645161290318 | <code object length at 0x7ff78da01ae0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 196>
0.207934 | 1.654682327476445 | <code object check_float_sequence at 0x7ff78da0c1e0, file "/home/defgsus/prog/python/dev/pector/pector/tools.py", line 18>
0.030468 | 1.2285483870967742 | <code object <listcomp> at 0x7ff78da01660, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 146>
0.166587 | 0.9418932060792473 | <code object is_number at 0x7ff78da0c0c0, file "/home/defgsus/prog/python/dev/pector/pector/tools.py", line 2>
0.02533 | 0.5100273840206185 | <code object <listcomp> at 0x7ff78da018a0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 187>
0.011345 | 0.4574596774193548 | <built-in method sum>
0.01087 | 0.4383064516129032 | <code object <listcomp> at 0x7ff78da01a50, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 205>
8.2e-05 | 0.42708333333333337 | <code object uniform at 0x7ff78da0c930, file "/usr/lib/python3.4/random.py", line 342>
0.022075 | 0.2904605263157894 | <code object __iter__ at 0x7ff78d9f3e40, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 51>
0.15862 | 0.2854828839854577 | <built-in method len>
0.006613 | 0.2666532258064516 | <built-in method sqrt>
0.031618 | 0.13867543859649123 | <code object __getitem__ at 0x7ff78d9f3ed0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 54>
0.010909 | 0.137739898989899 | <code object <lambda> at 0x7ff78d9fe780, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 99>
0.010068 | 0.13532258064516126 | <code object <lambda> at 0x7ff78da011e0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 129>
0.009718 | 0.13061827956989247 | <code object <lambda> at 0x7ff78d9feae0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 109>
0.009641 | 0.12958333333333336 | <code object <lambda> at 0x7ff78d9fe8a0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 103>
0.009471 | 0.1272983870967742 | <code object <lambda> at 0x7ff78d9fed20, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 116>
0.000546 | 0.11374999999999998 | <code object <lambda> at 0x7ff78d9fee40, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 119>
1.5e-05 | 0.078125 | <method 'random' of '_random.Random' objects>
0.03102 | 0.07660621147463252 | <code object __len__ at 0x7ff78d9f3db0, file "/home/defgsus/prog/python/dev/pector/pector/vec3.py", line 48>
0.0 | 0.0 | <method 'disable' of '_lsprof.Profiler' objects>
""" | [
"s.berke@netzkolchose.de"
] | s.berke@netzkolchose.de |
980e6fd73b204353dd08d97307bfc46b97061467 | 0b4957de738dd05f964ea838016b4b811feca970 | /ultron8/api/depends/get_jwt.py | dbdd7d50c59f3efd6c143787aef51a5f133e840d | [
"MIT",
"Apache-2.0"
] | permissive | bossjones/ultron8 | bdb5db72ba58b80645ae417cdf97287cfadd325d | 09d69c788110becadb9bfaa7b3d2a2046f6b5a1c | refs/heads/master | 2023-01-13T06:52:45.679582 | 2023-01-03T22:25:54 | 2023-01-03T22:25:54 | 187,934,920 | 0 | 0 | Apache-2.0 | 2023-01-03T22:25:56 | 2019-05-22T00:44:03 | Python | UTF-8 | Python | false | false | 1,459 | py | # SOURCE: https://github.com/bergran/fast-api-project-template
import re
from fastapi import Header, HTTPException
from starlette import status
from starlette.requests import Request
# from apps.token.constants.jwt import JWT_REGEX
from ultron8.api import settings
from ultron8.constants.jwt import JWT_REGEX
# it will regex always Authorization header with the header config that you set it or default JWT. If header does not exist or has not ^{header} [A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*$ format then will raise HTTPException and response with status code 400.
def get_jwt(
request: Request, authorization: str = Header("", alias="Authorization")
) -> str:
"""Uses regex to test existence of header in specified format. If the correct header does not exist, it will raise HTTPException and response with status code 400.
Arguments:
request {Request} -- [description]
Keyword Arguments:
authorization {str} -- [description] (default: {Header('', alias='Authorization')})
Raises:
HTTPException: [description]
Returns:
str -- [description]
"""
# config = request.state.config
regex = JWT_REGEX.format(settings.JWT_AUTH_HEADER_PREFIX)
if not re.match(regex, authorization):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Authorization has wrong format",
)
return authorization.split(" ")[-1]
| [
"noreply@github.com"
] | bossjones.noreply@github.com |
b3406f02bf858fa60820c26b9706ea1879d18919 | 9e8a754f62ab172043ca978c0fcce20687377498 | /pper.py | 6b4b8bb5df63a9a4f4dc692e7efdeebd6d1259fd | [
"MIT"
] | permissive | luyang93/ROSALIND | 41a5835ed634668742a7155e162ce14f1875b7a0 | f6e5a099a2c47203a14370cfe97ba41db1ae8429 | refs/heads/master | 2020-04-02T22:40:20.640063 | 2019-02-28T07:53:09 | 2019-02-28T07:53:09 | 154,840,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : pper.py
# @Date : 2019-02-17
# @Author : luyang(luyang@novogene.com)
from math import factorial
def main():
file = 'input/rosalind_pper.txt'
with open(file) as f:
n, k = map(int, f.readline().strip().split())
print(int(factorial(n) / factorial(n - k) % 1000000))
if __name__ == "__main__":
main()
| [
"510426762@qq.com"
] | 510426762@qq.com |
ce413e69ce72c2f5c0aae1812a035fff9118ef11 | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/client/gui/Scaleform/daapi/view/meta/ContactsTreeComponentMeta.py | 80bd939e51a3a9be5b3602defe190b7a9f38487d | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,413 | py | # 2016.11.19 19:51:17 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/ContactsTreeComponentMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class ContactsTreeComponentMeta(BaseDAAPIComponent):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIComponent
"""
def onGroupSelected(self, mainGroup, groupData):
self._printOverrideError('onGroupSelected')
def searchLocalContact(self, flt):
self._printOverrideError('searchLocalContact')
def hasDisplayingContacts(self):
self._printOverrideError('hasDisplayingContacts')
def as_updateInfoMessageS(self, enableSearchInput, title, message, warn):
if self._isDAAPIInited():
return self.flashObject.as_updateInfoMessage(enableSearchInput, title, message, warn)
def as_getMainDPS(self):
if self._isDAAPIInited():
return self.flashObject.as_getMainDP()
def as_setInitDataS(self, val):
if self._isDAAPIInited():
return self.flashObject.as_setInitData(val)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\Scaleform\daapi\view\meta\ContactsTreeComponentMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:51:17 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
b5e5a434aefbfbd80790b0c8b37c6fe48d9555e8 | 4910c0f3d03935fc8ee03f1e9dc20dfdb2c7c04b | /Codigos estudiantes por lenguaje/PY/Bryann Valderrama/Matemática/combinatories.py | 39b502005bfe3a301a93155655641484f1b875f2 | [] | no_license | roca12/gpccodes | ab15eeedc0cadc0735651262887b44f1c2e65b93 | aa034a3014c6fb879ec5392c51f9714bdc5b50c2 | refs/heads/master | 2023-02-01T13:49:27.563662 | 2023-01-19T22:50:58 | 2023-01-19T22:50:58 | 270,723,328 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from sys import stdout
wr = stdout.write
cont1 = 0
cont2 = 0
def combUtil(arr, data, start, end, index, r):
global cont1
if index == r:
for j in range(r):
wr(f'{data[j]} ')
wr('\n')
cont1 += 1
return
i = start
while i <= end and end - i + 1 >= r - index:
data[index] = arr[i]
combUtil(arr, data, i+1, end, index+1, r)
i += 1
def combinationRepetitionUtil(chosen, arr, index, r, start, end):
global cont2
if index == r:
for i in range(r):
wr(f'{arr[chosen[i]]} ')
wr('\n')
cont2 += 1
return
for i in range(start, end):
chosen[index] = i
combinationRepetitionUtil(chosen, arr, index+1, r, i, end)
return
def printComb(arr, n, r):
data = [0 for x in range(r)]
combUtil(arr, data, 0, n-1, 0, r)
def combinationRepetition(arr, n, r):
chosen = [0 for x in range(r+1)]
combinationRepetitionUtil(chosen, arr, 0, r, 0, n-1)
arrint1 = [1, 2, 3, 4, 5]
r1 = 3
n1 = len(arrint1)
printComb(arrint1, n1, r1)
wr(f'Hay {str(cont1)} Combinaciones Sin Repetición\n')
arrint2 = [1, 2, 3, 4, 5]
r2 = 2
n2 = len(arrint2)
combinationRepetition(arrint2, n2, r2)
wr(f'Hay {str(cont2)} Combinaciones Con Repetición')
| [
"noreply@github.com"
] | roca12.noreply@github.com |
276ae40a2e543da3e05e27e93b5d1815d60013fa | 162e0e4791188bd44f6ce5225ff3b1f0b1aa0b0d | /examples/preprocessing/plot_function_transformer.py | f2793846b5276e252b98eef912dc6bad871fcef9 | [] | no_license | testsleeekGithub/trex | 2af21fa95f9372f153dbe91941a93937480f4e2f | 9d27a9b44d814ede3996a37365d63814214260ae | refs/heads/master | 2020-08-01T11:47:43.926750 | 2019-11-06T06:47:19 | 2019-11-06T06:47:19 | 210,987,245 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from mrex.model_selection import train_test_split
from mrex.decomposition import PCA
from mrex.pipeline import make_pipeline
from mrex.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| [
"shkolanovaya@gmail.com"
] | shkolanovaya@gmail.com |
6af4bfdf7815e6a889dc3e4d43982356bec92e10 | 768058e7f347231e06a28879922690c0b6870ed4 | /venv/lib/python3.7/site-packages/numba/tests/npyufunc/test_dufunc.py | 7906221ae4cc9a43b5b7d94b034fe27a77edd570 | [] | no_license | jciech/HeisenbergSpinChains | 58b4238281d8c158b11c6c22dd0da82025fd7284 | e43942bbd09f6675e7e2ff277f8930dc0518d08e | refs/heads/master | 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | import pickle
import numpy as np
from numba import njit, vectorize
from numba.tests.support import MemoryLeakMixin
import unittest
from numba.np.ufunc import dufunc
def pyuadd(a0, a1):
return a0 + a1
class TestDUFunc(MemoryLeakMixin, unittest.TestCase):
def nopython_dufunc(self, pyfunc):
return dufunc.DUFunc(pyfunc, targetoptions=dict(nopython=True))
def test_frozen(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertFalse(duadd._frozen)
duadd._frozen = True
self.assertTrue(duadd._frozen)
with self.assertRaises(ValueError):
duadd._frozen = False
with self.assertRaises(TypeError):
duadd(np.linspace(0, 1, 10), np.linspace(1, 2, 10))
def test_scalar(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertEqual(pyuadd(1, 2), duadd(1, 2))
def test_npm_call(self):
duadd = self.nopython_dufunc(pyuadd)
@njit
def npmadd(a0, a1, o0):
duadd(a0, a1, o0)
X = np.linspace(0, 1.9, 20)
X0 = X[:10]
X1 = X[10:]
out0 = np.zeros(10)
npmadd(X0, X1, out0)
np.testing.assert_array_equal(X0 + X1, out0)
Y0 = X0.reshape((2, 5))
Y1 = X1.reshape((2, 5))
out1 = np.zeros((2, 5))
npmadd(Y0, Y1, out1)
np.testing.assert_array_equal(Y0 + Y1, out1)
Y2 = X1[:5]
out2 = np.zeros((2, 5))
npmadd(Y0, Y2, out2)
np.testing.assert_array_equal(Y0 + Y2, out2)
def test_npm_call_implicit_output(self):
duadd = self.nopython_dufunc(pyuadd)
@njit
def npmadd(a0, a1):
return duadd(a0, a1)
X = np.linspace(0, 1.9, 20)
X0 = X[:10]
X1 = X[10:]
out0 = npmadd(X0, X1)
np.testing.assert_array_equal(X0 + X1, out0)
Y0 = X0.reshape((2, 5))
Y1 = X1.reshape((2, 5))
out1 = npmadd(Y0, Y1)
np.testing.assert_array_equal(Y0 + Y1, out1)
Y2 = X1[:5]
out2 = npmadd(Y0, Y2)
np.testing.assert_array_equal(Y0 + Y2, out2)
out3 = npmadd(1.0, 2.0)
self.assertEqual(out3, 3.0)
def test_ufunc_props(self):
duadd = self.nopython_dufunc(pyuadd)
self.assertEqual(duadd.nin, 2)
self.assertEqual(duadd.nout, 1)
self.assertEqual(duadd.nargs, duadd.nin + duadd.nout)
self.assertEqual(duadd.ntypes, 0)
self.assertEqual(duadd.types, [])
self.assertEqual(duadd.identity, None)
duadd(1, 2)
self.assertEqual(duadd.ntypes, 1)
self.assertEqual(duadd.ntypes, len(duadd.types))
class TestDUFuncPickling(MemoryLeakMixin, unittest.TestCase):
def check(self, ident, result_type):
buf = pickle.dumps(ident)
rebuilt = pickle.loads(buf)
# Check reconstructed dufunc
r = rebuilt(123)
self.assertEqual(123, r)
self.assertIsInstance(r, result_type)
# Try to use reconstructed dufunc in @jit
@njit
def foo(x):
return rebuilt(x)
r = foo(321)
self.assertEqual(321, r)
self.assertIsInstance(r, result_type)
def test_unrestricted(self):
@vectorize
def ident(x1):
return x1
self.check(ident, result_type=(int, np.integer))
def test_restricted(self):
@vectorize(["float64(float64)"])
def ident(x1):
return x1
self.check(ident, result_type=float)
if __name__ == "__main__":
unittest.main()
| [
"jan@multiply.ai"
] | jan@multiply.ai |
b4bc5a6e014113adc13d24c1e92aa5b0ae1eea08 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/aio/operations/_application_gateway_private_endpoint_connections_operations.py | 618cffe13b15ad80c265cba1a816c1af14f74b4a | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 21,719 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationGatewayPrivateEndpointConnectionsOperations:
"""ApplicationGatewayPrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "models.ApplicationGatewayPrivateEndpointConnection",
**kwargs
) -> Optional["models.ApplicationGatewayPrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ApplicationGatewayPrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationGatewayPrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
parameters: "models.ApplicationGatewayPrivateEndpointConnection",
**kwargs
) -> AsyncLROPoller["models.ApplicationGatewayPrivateEndpointConnection"]:
"""Updates the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:param parameters: Parameters supplied to update application gateway private endpoint
connection operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationGatewayPrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayPrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
application_gateway_name=application_gateway_name,
connection_name=connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_gateway_name: str,
connection_name: str,
**kwargs
) -> "models.ApplicationGatewayPrivateEndpointConnection":
"""Gets the specified private endpoint connection on application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:param connection_name: The name of the application gateway private endpoint connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationGatewayPrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayPrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
application_gateway_name: str,
**kwargs
) -> AsyncIterable["models.ApplicationGatewayPrivateEndpointConnectionListResult"]:
"""Lists all private endpoint connections on an application gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_gateway_name: The name of the application gateway.
:type application_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationGatewayPrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.ApplicationGatewayPrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ApplicationGatewayPrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationGatewayName': self._serialize.url("application_gateway_name", application_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationGatewayPrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/privateEndpointConnections'} # type: ignore
| [
"noreply@github.com"
] | paultaiton.noreply@github.com |
5c192053bb1e71e4c8e3f2ba9558706aecb74d73 | 0f7786e5664e034e77d5529eaafc5a216abcaedb | /resources/lib/pytumblr/models/other/Primary.py | 9e5dfeb2bfbad806a0ce71c9712531a57adfd8c0 | [] | no_license | zoelmd/plugin.video.tumblrv | 38e3acdcdb457c83d1c5acca458d82eeebfd6c65 | 2164a905d5d90469ac1c3b4668ee5b410e6e8541 | refs/heads/master | 2020-03-28T19:46:01.963031 | 2017-08-15T01:36:14 | 2017-08-15T01:36:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | #!/usr/bin/python
class Primary:
def __init__(self, type):
self.type = type
| [
"r00trox"
] | r00trox |
3949bb74efb7e83cbc9afa63dac2123e7d80a210 | d2ff5d87ce0cd69e05d5d1462cedf89b7bb49c14 | /python/old/print_a_lot.py | 12087f4f55a36576d5fc8aac603bc477152ce694 | [] | no_license | kt3k/anarchy-golf-old-solutions | ecc1b9d33508e1a9fd376f10a2b4ef7854e87c91 | 0ad9804af82b5ebb0bf5cb52083b925998e86cb4 | refs/heads/master | 2023-08-18T03:55:50.085429 | 2015-04-25T08:16:08 | 2015-04-25T08:16:08 | 34,560,939 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | for i in range(10**5):print i, | [
"stibium121@gmail.com"
] | stibium121@gmail.com |
0314afce70df3d153a8efc40f409c5cc3922c701 | f4ad721b7158ff2605be6f7e4bde4af6e0e11364 | /vt_manager_kvm/src/python/vt_manager_kvm/controller/drivers/KVMDriver.py | 99551db2acfc0c494510c3c8be8a19887b9f590f | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | ict-felix/stack | 3fb4222a0538c0dbbe351ccc3da1bafa9ca37057 | 583ccacf067b9ae6fc1387e53eaf066b4f3c0ade | refs/heads/master | 2021-01-10T10:16:29.851916 | 2016-06-22T15:11:11 | 2016-06-22T15:11:11 | 51,439,714 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,371 | py | from vt_manager_kvm.controller.drivers.VTDriver import VTDriver
from vt_manager_kvm.models.KVMServer import KVMServer
from vt_manager_kvm.models.KVMVM import KVMVM
from vt_manager_kvm.models.VTServer import VTServer
from vt_manager_kvm.utils.HttpUtils import HttpUtils
import threading
import logging
class KVMDriver(VTDriver):
logger = logging.getLogger("KVMDriver")
# def __init__(self):
# self.ServerClass = eval('KVMServer')
# self.VMclass = eval('KVMVM')
@staticmethod
def getInstance():
return KVMDriver()
def deleteVM(self, vm):
KVMDriver.logger.debug("deleteVM start")
try:
vm.Server.get().deleteVM(vm)
except:
raise
def getServerAndCreateVM(self,action):
try:
Server = KVMServer.objects.get(uuid = action.server.uuid )
VMmodel = Server.createVM(*KVMDriver.kvmVMtoModel(action.server.virtual_machines[0],threading.currentThread().callBackURL, save = True))
return Server, VMmodel
except Exception as e:
raise e
@staticmethod
def createOrUpdateServerFromPOST(request, instance):
#return KVMServer.constructor(server.getName(),server.getOSType(),server.getOSDistribution(),server.getOSVersion(),server.getAgentURL(),save=True)
server = KVMServer.objects.get(uuid = instance.getUUID())
if server:
return server.updateServer(HttpUtils.getFieldInPost(request,VTServer, "name"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemType"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemDistribution"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemVersion"),
HttpUtils.getFieldInPost(request,VTServer, "numberOfCPUs"),
HttpUtils.getFieldInPost(request,VTServer, "CPUFrequency"),
HttpUtils.getFieldInPost(request,VTServer, "memory"),
HttpUtils.getFieldInPost(request,VTServer, "discSpaceGB"),
HttpUtils.getFieldInPost(request,VTServer, "agentURL"),
save=True)
else:
return KVMServer.constructor(HttpUtils.getFieldInPost(request,VTServer, "name"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemType"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemDistribution"),
HttpUtils.getFieldInPost(request,VTServer, "operatingSystemVersion"),
HttpUtils.getFieldInPost(request,VTServer, "numberOfCPUs"),
HttpUtils.getFieldInPost(request,VTServer, "CPUFrequency"),
HttpUtils.getFieldInPost(request,VTServer, "memory"),
HttpUtils.getFieldInPost(request,VTServer, "discSpaceGB"),
HttpUtils.getFieldInPost(request,VTServer, "agentURL"),
save=True)
def crudServerFromInstance(self,instance):
server = KVMServer.objects.filter(uuid = instance.getUUID())
if len(server)==1:
server = server[0]
return server.updateServer(instance.getName(),
instance.getOSType(),
instance.getOSDistribution(),
instance.getOSVersion(),
instance.getNumberOfCPUs(),
instance.getCPUFrequency(),
instance.getMemory(),
instance.getDiscSpaceGB(),
instance.getAgentURL(),
instance.getAgentPassword(),
save = True)
elif len(server)==0:
return KVMServer.constructor(instance.getName(),
instance.getOSType(),
instance.getOSDistribution(),
instance.getOSVersion(),
instance.getNumberOfCPUs(),
instance.getCPUFrequency(),
instance.getMemory(),
instance.getDiscSpaceGB(),
instance.getAgentURL(),
instance.getAgentPassword(),
save=True)
else:
raise Exception("Trying to create a server failed")
@staticmethod
def kvmVMtoModel(VMxmlClass, callBackURL, save):
name = VMxmlClass.name
uuid = VMxmlClass.uuid
projectId = VMxmlClass.project_id
projectName = VMxmlClass.project_name
sliceId = VMxmlClass.slice_id
sliceName = VMxmlClass.slice_name
osType = VMxmlClass.operating_system_type
osVersion = VMxmlClass.operating_system_version
osDist = VMxmlClass.operating_system_distribution
memory = VMxmlClass.xen_configuration.memory_mb
# XXX
callBackUrl = callBackURL
hdSetupType = VMxmlClass.xen_configuration.hd_setup_type
hdOriginPath = VMxmlClass.xen_configuration.hd_origin_path
virtSetupType = VMxmlClass.xen_configuration.virtualization_setup_type
return name,uuid,projectId,projectName,sliceId,sliceName,osType,osVersion,osDist,memory,None,None,callBackUrl,hdSetupType,hdOriginPath,virtSetupType,save
| [
"jenkins@localhost"
] | jenkins@localhost |
f6a05657a5c17bf48443b42529171074d24ae2fd | 637ef3599c62d9c186741fbb5cf37961298aa01f | /tests/func/test_grammar.py | dd1998fbcb547affb2647707050afbe569050e12 | [
"MIT",
"Python-2.0"
] | permissive | jwcraftsman/parglare | 7c6d36e42001225b85f3790fa993c5254f5dae98 | 39927c4ce4aabe78d9ad72e51dd2366947abffa0 | refs/heads/master | 2021-09-20T13:41:03.318924 | 2018-08-09T11:16:03 | 2018-08-09T11:16:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,082 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from parglare import Parser, Grammar, Terminal, NonTerminal
from parglare.grammar import ASSOC_LEFT, ASSOC_RIGHT, DEFAULT_PRIORITY
from parglare.exceptions import GrammarError, ParseError
def test_single_terminal():
"""
Test that grammar may be just a single terminal.
"""
grammar = r"""
S: A;
terminals
A: "a";
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
result = parser.parse('a')
assert result == 'a'
grammar = r"""
S: A;
terminals
A: /\d+/;
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
result = parser.parse('23')
assert result == '23'
def test_undefined_grammar_symbol():
"Tests that undefined grammar symbols raises errors."
grammar = """
S: A B;
A: "a" | B;
B: id;
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'Unknown symbol' in str(e)
assert 'id' in str(e)
def test_multiple_terminal_definition():
grammar = """
S: A A;
terminals
A: "a";
A: "b";
"""
with pytest.raises(GrammarError,
match=r'.*Multiple definitions of terminal rule.*'):
Grammar.from_string(grammar)
def test_reserved_symbol_names():
"""
Test that reserved symbol names can't be used.
"""
grammar = """
S: EOF "First";
EOF: "eof";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'is reserved' in str(e)
grammar = """
S: STOP "First";
STOP: "stop";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'is reserved' in str(e)
grammar = """
S: EMPTY "First";
EMPTY: "stop";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'is reserved' in str(e)
def test_assoc_prior():
"""Test that associativity and priority can be defined for productions and
terminals.
"""
grammar = """
E: E '+' E {left, 1};
E: E '*' E {2, left};
E: E '^' E {right};
E: id;
terminals
id: /\d+/;
"""
g = Grammar.from_string(grammar)
assert g.productions[1].prior == 1
assert g.productions[1].assoc == ASSOC_LEFT
assert g.productions[3].assoc == ASSOC_RIGHT
assert g.productions[3].prior == DEFAULT_PRIORITY
assert g.productions[3].prior == DEFAULT_PRIORITY
# Repeat the same but for alternative keywords "shift" and "reduce"
grammar = """
E: E '+' E {reduce, 1};
E: E '*' E {2, reduce};
E: E '^' E {shift};
E: id;
terminals
id: /\d+/;
"""
g = Grammar.from_string(grammar)
assert g.productions[1].prior == 1
assert g.productions[1].assoc == ASSOC_LEFT
assert g.productions[3].assoc == ASSOC_RIGHT
assert g.productions[3].prior == DEFAULT_PRIORITY
assert g.productions[3].prior == DEFAULT_PRIORITY
def test_terminal_priority():
"Terminals might define priority which is used for lexical disambiguation."
grammar = """
S: A | B;
A: 'a' {15};
B: 'b';
"""
g = Grammar.from_string(grammar)
for t in g.terminals:
if t.name == 'A':
assert t.prior == 15
else:
assert t.prior == DEFAULT_PRIORITY
def test_no_terminal_associavitity():
"Tests that terminals can't have associativity defined."
grammar = """
S: A | B;
terminals
A: 'a' {15, left};
B: 'b';
"""
with pytest.raises(ParseError) as e:
Grammar.from_string(grammar)
assert 'Expected: Prior or dynamic or finish or nofinish or prefer' \
in str(e)
def test_terminal_empty_body():
"""
Test that terminals may have empty bodies (when defined using
recognizers)
"""
grammar = """
S: A | B;
terminals
A: {15};
B: ;
"""
g = Grammar.from_string(grammar, recognizers={'B': None, 'A': None})
a = g.get_terminal('A')
assert a.prior == 15
b = g.get_terminal('B')
assert b.recognizer is None
def test_builtin_grammar_action():
"""
Builtin actions can be referenced from a grammar.
"""
grammar = """
@collect
Ones: Ones One | One;
terminals
One: "1";
"""
g = Grammar.from_string(grammar)
ones = g.get_nonterminal('Ones')
from parglare.actions import collect
assert ones.action == collect
p = Parser(g)
result = p.parse('1 1 1 1 1')
assert result == "1 1 1 1 1".split()
def test_multiple_grammar_action_raises_error():
"""
If multiple actions are given for the same non-terminal GrammarError
should be raised.
"""
grammar = """
S: Ones;
@collect
Ones: Ones One | One;
@something
Ones: 'foo';
terminals
One: "1";
"""
# Actions 'collect' and 'something' defined for rule 'Ones'
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'Multiple' in str(e)
def test_action_override():
"""
Explicitely provided action in `actions` param overrides default or
grammar provided.
"""
grammar = """
S: Foo Bar;
@pass_nochange
Bar: "1" a;
terminals
@pass_nochange
Foo: 'foo';
a: "a";
"""
g = Grammar.from_string(grammar)
p = Parser(g)
input_str = "foo 1 a"
result = p.parse(input_str)
assert result == ["foo", ["1", "a"]]
actions = {
"Foo": lambda _, __: "eggs",
"Bar": lambda _, __: "bar reduce"}
p = Parser(g, actions=actions)
result = p.parse(input_str)
assert result == ["eggs", "bar reduce"]
# Test with actions call postponing
p = Parser(g, build_tree=True, actions=actions)
tree = p.parse(input_str)
result = p.call_actions(tree)
assert result == ["eggs", "bar reduce"]
def assignment_in_productions(prods, symbol_name, assgn_name):
found = False
for p in prods:
if p.symbol.name == symbol_name:
found = assgn_name in p.assignments
return found
def test_assignment_plain():
"""
Test plain assignment.
"""
grammar = """
S: "1" first=some_match "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == "2"
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 3'
result = p.parse(input_str)
assert result == ["1", "2", "3"]
assert called[0]
def test_assignment_bool():
"""
Test bool assignment.
"""
grammar = """
S: "1" first?=some_match "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first is True
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 3'
result = p.parse(input_str)
assert result == ["1", "2", "3"]
assert called[0]
def test_assignment_of_repetition():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+ "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == ["2", "2"]
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], "3"]
assert called[0]
def test_assignment_of_repetition_with_sep():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+[comma] "3";
terminals
some_match: "2";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == ["2", "2"]
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2, 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], "3"]
assert called[0]
def test_multiple_assignment_with_repetitions():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+[comma] second?=some_match* "3";
terminals
some_match: "2";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
assert assignment_in_productions(g.productions, 'S', 'second')
called = [False]
def act_s(_, nodes, first, second):
called[0] = True
assert first == ["2", "2"]
assert second is True
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2, 2 2 2 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], ["2", "2", "2"], "3"]
assert called[0]
def test_case_insensitive_parsing():
"""
By default parglare is case sensitive. This test parsing without case
sensitivity.
"""
grammar = """
S: "one" "Two" Astart;
terminals
Astart: /Aa\w+/;
"""
g = Grammar.from_string(grammar)
# By default parsing is case sensitive for both string and regex matches.
parser = Parser(g)
with pytest.raises(ParseError):
parser.parse('One Two Aaa')
with pytest.raises(ParseError):
parser.parse('one Two AAa')
g = Grammar.from_string(grammar, ignore_case=True)
parser = Parser(g)
parser.parse('One Two Aaa')
parser.parse('one Two AAa')
| [
"igor.dejanovic@gmail.com"
] | igor.dejanovic@gmail.com |
661f174bbea67466701a43643b0ddd608c96077a | 7bf617f77a55d8ec23fa8156c1380b563a5ac7f6 | /CG/Snitch/3D_Scripting_Mac/CS4800/icQTOwn_2.py | 02bef9f467f01e95f2bc0720c53129697138f3f0 | [] | no_license | anyatran/school | c06da0e08b148e3d93aec0e76329579bddaa85d5 | 24bcfd75f4a6fe9595d790808f8fca4f9bf6c7ec | refs/heads/master | 2021-06-17T10:45:47.648361 | 2017-05-26T12:57:23 | 2017-05-26T12:57:23 | 92,509,148 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 42,932 | py | #----------------------------------------------------------------------------------------------
#
# QTown tool
#
# SCRIPT: icQTOwn.mel
#
# AUTHOR: Ingo Clemens
# info@effect-house.de
#
# DESCRIPTION:
# This script creates a city like geometry based on the selected polygons.
# Just simply select some faces of the object to build upon and execute.
#
# VERSIONS:
# 1.3 - Oct. 02, 2009
#
# - added the feature to not be restricted to a planar surface, in order to
# allow for features such as hills
# - added a curved option, to be able to work with cylindrical and
# spherical shapes (disables the detail and well features - sorry :-)!)
# - added a colorize option, which assigns some shaders to the surfaces
# NOTE: WITH THIS OPTION ENABLED IT IS COMMON THAT UNDOING THE TOOL WILL
# NOT WORK CORRECTLY, SO BE SURE TO SAFE PRIOR TO
# EXECUTING! UNDOING AND RERUNNING THE SCRIPT CAN EVENTUALLY CAUSE
# MAYA TO CRASH!
# FIXES:
# - fixed, what caused maya to crash on some machines running windows
# - the well occurance parameter wasn't correctly implemented
# - fixed a bug where some wells have received top rims
#
# 1.1 - Sep. 21, 2009
# 1.0 - Jul. 12, 2009
#
#----------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------
#
# USE AND MODIFY AT YOUR OWN RISK!!
#
#----------------------------------------------------------------------------------------------
import maya.cmds as cmds
import random
import math
def icQTexecute():
#initializing the viarable in python with none
sel = None
sel = cmds.ls(selection=True)
polyNameArray = None
polyName = None
compList = None
faceNum = None
dropout = None
lengthValue = None
if ( sel[0].nodeType != "mesh"):
cmds.error("Please select some polygons!")
del faceNum[0:len(faceNum)]
#get the numbers of the selected polygons
compList = icQTgetSelectionNumbers()
faceNum = compList.split(":")
polyNameArray = sel[0].split(".")
#create the shaders if necessary
if (cmds.checkBox('applyColor', query=True, value=True)):
icTQcreateColors()
cmds.progressWindow(title="Constructing",
progress=0,
max=(len(faceNum)),
isInterruptable=True)
#go through each selected polygon and extrude it
for i, w in enumerate(faceNum):
cmds.progressWindow(endProgress=True,progress=i,status=("Face " + i + " of " + (len(faceNum)) + " in Progress"))
dropout = random.randint(1,10)
if (dropout > (cmds.checkBox(dropout, query=True, value=True)) / 10 + 1):
#select the current polygon
polyName = polyNameArray[0] + ".f[" + faceNum[i] + "]"
cmds.select(polyName,replace=True)
lengthValue = icQTinnerExtrude(polyName, polyNameArray[0], "first")
icQTfirstLevelExtrude( polyName, lengthValue, polyNameArray[0] )
# Check if the dialog has been cancelled
if (cmds.progressWindow(query=True,isCancelled=True)):
break
#if any geometry for the top surfaces have been created combine them with the source
#geometry
if (cmds.objExists('topCubes')):
cmds.select(polyNameArray[0],replace=True)
topCubes = cmds.listRelatives('topCubes',children=True)
for cubes in topCubes:
cmds.select(polyNameArray[0],allDescendents=True)
cmds.polyUnite(0,polyUnite=True)
cmds.constructionHistory()
cmds.progressWindow(endProgress=True)
cmds.select(clear=True)
def icQTgetSelectionNumbers():
sel = None
sel = cmds.ls(selection=True)
selArray=None
selPart=None
selStart=None
selEnd=None
faceNum=None
compList=None
#find all polygon numbers of the selection
for s in sel:
#if there are is a range of polygons selected
if ":" in s:
#separate the start and end numbers
selArray = s.split(":")
#find the number of digits of the polygon number
selPart=selArray[0].split("[")
#define the number for the start of the range
selStart = min((len(selArray[0]) - len(selPart[1]) + 1), (len(selArray[0])))
#define the number for the end of the range
selPart=selArray[1].split("]")
selEnd = max(1, (len(selPart[0])))
#build a list of all numbers in between
for i in range(selStart,selEnd):
faceNum[len(faceNum)] = i
#if there is a single polygon listed
else:
selArray = s.split("[")
faceNum[len(faceNum)] = max(1, (len(selArray[1]) - 1))
for f in faceNum:
compList = (compList + f + ":")
return compList
def icQTinnerExtrude( polyName, nodeName, stage ):
compList=None
edgeNum=None
extrude=None
bbox1=None
bbox2=None
freeX=None
freeY=None
localScale=None
moveVal=None
moveValList=None
moveValMul=None
edgePos=None
edgeLengthArray=None
lengthValue=None
lengthValueList=None
baseInset=None
randSign=None
moveValIndex=None
#continue = 1
edgeStart=None
edgeEnd=None
edgeLength=None
#assign the random scale values
scaleRangeLow = cmds.floatFieldGrp(query=True,value1='minScale')
scaleRangeHi = 1.5
randSX = random.uniform(scaleRangeLow,scaleRangeHi)
randSY = random.uniform(scaleRangeLow,scaleRangeHi)
#color the selection
if (stage == "first"):
icQTapplyColor(polyName, "street" )
else:
#colorize
colorNames = ["concrete1","concrete2","dark1","dark2","light1","light2"]
randColor = random.randint(0,5.9)
icQTapplyColor(polyName, colorNames[randColor] )
#get the initial bounding box of the selection
bbox1 = cmds.xform(polyName,query=True,boundingBox=True)
#extrude the face and scale it
extrude = cmds.polyExtrudeFacet(polyName,constructionHistory=True,keepFacesTogether=False)
cmds.setAttr((extrude[0] + ".localScale") , randSX, randSY, 1, type="double3")
#decrease the local scale until the size is smaller than the inital bounding box
for i in range(0,50):
localScale = cmds.getAttr(extrude[0] + ".localScale")
cmds.setAttr((extrude[0] + ".localScaleX"), (localScale[0] * 0.99))
cmds.setAttr((extrude[0] + ".localScaleY"), (localScale[1] * 0.99))
bbox2 = cmds.xform(polyName,query=True,boundingBox=True)
#include some headroom space in the calculation
freeX = bbox2[3] / 30
freeY = bbox2[5] / 30
#if the bounding box is in positive space go for the max values first
if (bbox1[3] >= 0):
#check for the max values of the bounding box
if ((abs(bbox1[3]) > abs(bbox2[3] + freeX))and(abs(bbox1[5]) > abs(bbox2[5] + freeY))):
#check for the min values of the bounding box
if ((abs(bbox1[0]) > abs(bbox2[0] + freeX))and(abs(bbox1[2]) > abs(bbox2[2] + freeY))):
break
#if the bounding box is in negative space go for the min values first
else:
#check for the min values of the bounding box
if ((abs(bbox1[0]) > abs(bbox2[0] + freeX))and(abs(bbox1[2]) > abs(bbox2[2] + freeY))):
#check for the max values of the bounding box
if ((abs(bbox1[3]) > abs(bbox2[3] + freeX))and(abs(bbox1[5]) > abs(bbox2[5] + freeY))):
break
#if the surface is non planar scale the height to zero
if (not(cmds.checkBox(query=True,value='planarCheck'))):
if (bbox2[1] != bbox2[4]):
cmds.scale(1,0,1,polyName,pivot=(0,(bbox2[1] + ((bbox2[4] - bbox2[1]) / 2)),0),relative=True)
#define a offset multiplier for each polygon
moveValMult = random.uniform(0.1,1)
moveValList[0] = (abs(bbox1[3]) - abs(bbox2[3])) * moveValMult
moveValList[1] = (abs(bbox1[5]) - abs(bbox2[5])) * moveValMult
#create a random index to choose if the translation is in X or Y
moveValIndex = random.uniform(0,1.9)
#create a random sign for the translation
randSign = random.uniform(-1.9,1.9)
if (moveValIndex == 0):
cmds.setAttr((extrude[0] + ".localTranslateX"), (moveValList[0] * randSign))
else:
cmds.setAttr((extrude[0] + ".localTranslateY"), (moveValList[1] * randSign))
#convert the face to an edge selection
cmds.select(cmds.polyListComponentConversion(polyName,fromFace=True,toEdge=True ))
#get the numbers of the selected edges
compList = icQTgetSelectionNumbers()
edgeNum = compList.split(":")
#find the longest edge in the list
for e in edgeNum:
lengthValue = 0
edgePos = cmds.xform(query=True,worldSpace=True,translation=(nodeName + ".e[" + e + "]"))
#edgeStart = Vector(edgePos[0],edgePos[1],edgePos[2])
#edgeEnd = Vector(edgePos[3],edgePos[4],edgePos[5])
#calculate the length of the edge
edgeLengthArray = [abs(edgePos[0] - edgePos[3]),abs(edgePos[1]-edgePos[4]),abs(edgePos[2]-edgePos[5])]
for el in edgeLengthArray:
if (el > lengthValue):
lengthValue = el
lengthValueList[len(lengthValueList)] = lengthValue
#finally find the longest edge
lengthValue = 0
for v in lengthValueList:
if (v > lengthValue):
lengthValue = v
#create a base extrusion if the polygon hasn't been offset much
# if (moveValMult < 0.2):
#
# baseInset = rand(0.9,0.95)
# extrude = `polyExtrudeFacet -ch 1 -kft 0 polyName`
# setAttr (extrude[0] + ".localTranslateZ") (lengthValue / 4)
# extrude = `polyExtrudeFacet -ch 1 -kft 0 polyName`
# setAttr (extrude[0] + ".localScaleX") baseInset
# setAttr (extrude[0] + ".localScaleY") baseInset
return lengthValue
def icQTfirstLevelExtrude(polyName, lengthValue, nodeName):
extrude=None
bbCube=None
cubeCenterX=None
cubeCenterZ=None
cubeSize=None
randomHeight=None
randomWellHeight=None
special=None
shrinkScale=None
randSecondLevel=None
wellsRandom=None
shrinkRandom=None
noWells=None
#extrude the face with some random height
heightMin = cmds.floatFieldGrp(query=True,value1='heightRange')
heightMax = cmds.floatFieldGrp(query=True,value2='heightRange')
randomHeight = random.uniform(heightMin,heightMax)
randomWellHeight = random.uniform(1,1.1)
#colorize
colorNames = ["brick1","brick2","glass1","glass2",
"sand1","sand2","dark1","dark2","light1","light2"]
randColor = random.uniform(0,9.9)
special = random.uniform(1,10)
stacksTrue = cmds.checkBox(query=True,value='stacksCheck')
if (stacksTrue == 1 and special > 1 and special < 2):
icQTstackExtrude ( polyName, (lengthValue * randomHeight) )
noWells = 1
else:
icQTapplyColor(polyName, colorNames[randColor] )
shrinkRandom = random.uniform(0,12)
shrinkScale = random.uniform(0.8,0.95)
extrude = cmds.polyExtrudeFacet(polyName,constructionHistory=True,keepFacesTogether=False)
cmds.setAttr((extrude[0] + ".localTranslateZ"), (lengthValue * randomHeight))
if (cmds.checkBox(query=True,value='shrinkCheck') and (shrinkRandom < 2)):
cmds.setAttr((extrude[0] + ".localScaleX"), shrinkScale)
cmds.setAttr((extrude[0] + ".localScaleY"), shrinkScale)
noWells = 1
#
#for the vertical walls
#
if (cmds.checkBox(query=True,value='wellsCheck') and (noWells == 0)):
wellsRandom = random.uniform(0,10)
if (wellsRandom < cmds.floatFieldGrp(query=True,value1='wellsOccur')):
#get the dimensions
bbCube = cmds.xform(polyName,query=True,boundingBox=True)
cubeSize = (abs(bbCube[0] - bbCube[3])) * (abs(bbCube[2] - bbCube[5]))
cubeCenterX = (bbCube[0] + bbCube[3]) / 2
cubeCenterZ = (bbCube[2] + bbCube[5]) / 2
wells = icQTwells ( lengthValue, cubeSize )
#get the dimension for the longest edge to decide if the vertical walls need to be rotated
if (bbCube[5] - bbCube[2] > bbCube[3] - bbCube[0]):
cmds.rotate(0, 90, 0, wells, relative=True)
#place the wells at the center of the polygon
cmds.xform('wells',ws=True,t=(cubeCenterX, (bbCube[1] - (lengthValue * randomHeight)), cubeCenterZ))
cmds.makeIdentity('wells',t=True,s=True,a=True)
#scale the wells to the height of the extrusion
cmds.scale(1, (lengthValue * randomHeight * random.uniform(0.97,1.005)), 1, wells,absolute=True)
#if not already present create a group and parent the new geometry under it
if (not(cmds.objExists('topCubes'))):
cmds.createNode('transform',n='topCubes')
cmds.parent('wells', 'topCubes')
#define the color for the top rim
#colorize
rimColor = ["rim1","rim2"]
randColorRim = random.uniform(0,1.9)
#build a rim at the top
icQTbuildRim( polyName, rimColor[randColorRim] )
#build another level based on random number
randSecondLevel = random.uniform(0.2,4)
if (randSecondLevel < 1 and randomHeight >= 1 and cmds.checkBox(query=True,v='roofCube')):
icQTroofCube ( polyName, nodeName, rimColor[randColorRim] )
if (randSecondLevel == 1 and randomHeight >= 2):
if (cmds.checkBox(query=True,v='stairs')):
icQTstairStepSecondLevel ( polyName, lengthValue, colorNames[randColor], rimColor[randColorRim] )
if (randomHeight >= 2 and cmds.checkBox(query=True, v='antenna')):
icQTbuildAntenna ( polyName, lengthValue )
else:
if (cmds.checkBox(query=True,value='topDetail')):
for i in range(0,cmds.radioButtonGrp(query=True,s1='complexity')):
icQTtopDetail ( polyName, i )
else:
if (cmds.checkBox(query=True,value='topDetail')):
for i in range(0,cmds.radioButtonGrp(query=True,s1='complexity')):
icQTtopDetail ( polyName, i )
if (randSecondLevel == 2):
randomTop = random.uniform(-1,1.3)
if (randomTop > 0):
icQTsplitSecondLevel ( polyName, nodeName )
if (randomHeight >= heightMax / 2 and cmds.checkBox(query=True,v='topDetail')):
for i in range(0,cmds.radioButtonGrp(query=True,s1='complexity')):
icQTtopDetail ( polyName, i )
elif (randomTop < 0 and cmds.checkBox(query=True, v='topDetail')):
icQTtopDetail ( polyName, 0 )
icQTdivideSecondLevel ( polyName, nodeName )
cmds.select(nodeName,r=True)
cmds.polySoftEdge(a=False)
cmds.delete(nodeName,ch=True)
def icQTstackExtrude( polyName, lengthValue ):
extrude=None
baseHeight=None
storyHeight=None
ledgeHeight=None
ledgeScale=None
storyScale=None
storyLedgeRatio=None
floors=None
#define the base height before the stacking occurs
baseHeight = random.uniform(0.05,0.15)
#the actual base height is a fragment of the overall length
baseHeight = baseHeight * lengthValue
#the remaining extrude length is without the base height
lengthValue = lengthValue - baseHeight
#the story height depends on the scale
#define the ledge scale and story scale
ledgeScale = random.uniform(0.9,0.98)
ledgeUpScale = 1 + (1 - ledgeScale)
storyScale = random.uniform(0.5,2)
floors = math.ceil(lengthValue / storyScale)
storyLedgeRatio = random.uniform(4,11)
#the story height is 3/4, the ledge height is 1/4
storyHeight = lengthValue / floors / storyLedgeRatio
ledgeHeight = storyHeight
storyHeight = storyHeight * (storyLedgeRatio - 1)
#extrude the base height
extrude = cmds.polyExtrudeFacet(polyName, ch=True,kft=False)
cmds.setAttr((extrude[0] + ".localTranslateZ"),baseHeight)
#colorize
colorLedge = ["concrete1","concrete2","dark1","dark2","light1","light2"]
colorFloor = ["brick1","brick2","glass1","glass2","sand1","sand2","light1","light2"]
randColorLedge = random.uniform(0,5.9)
randColorFloor = random.uniform(0,7.9)
icQTapplyColor ( polyName, colorLedge[randColorLedge] )
icQTapplyColor ( polyName, colorFloor[randColorFloor] )
#create the stories
for i in range(0,floors):
icQTapplyColor ( polyName, colorLedge[randColorLedge] )
#extrude the face and scale it
extrude = cmds.polyExtrudeFacet(polyName,ch=True,kft=False)
cmds.setAttr((extrude[0] + ".localScale"), ledgeUpScale, ledgeUpScale, 1,type='double3')
#extrude for the ledge height
extrude = cmds.polyExtrudeFacet(polyName,ch=True,kft=False)
cmds.setAttr((extrude[0] + ".localTranslateZ"), ledgeHeight)
#extrude for the ledge inset
extrude = cmds.polyExtrudeFacet(polyName,ch=True,kft=False)
cmds.setAttr((extrude[0] + ".localScale"), ledgeScale, ledgeScale, 1,type='double3')
#extrude for the floor height
icQTapplyColor ( polyName, colorFloor[randColorFloor] )
extrude = cmds.polyExtrudeFacet(polyName,ch=True,kft=False)
cmds.setAttr ((extrude[0] + ".localTranslateZ"), storyHeight)
#colorize the top polygon
colorNames = ["rim1","rim2"]
randColor = random.uniform(0,1.9)
icQTapplyColor ( polyName, colorNames[randColor] )
def icQTstairStepSecondLevel( polyName, lengthValue, color1, color2 ):
extrude=None
maxLevelSteps = cmds.radioButtonGrp(query=True,sl='maxLevelSteps')
levelNum = random.uniform(1,maxLevelSteps + 1)
levelHeight = random.uniform(0.2,0.8)
levelScale = random.uniform(0.6,0.8)
for i in range(0,levelNum):
#extrude the face and scale it
extrude = cmds.polyExtrudeFacet(polyName,ch=True,kft=False)
cmds.setAttr((extrude[0] + ".localScale"),levelScale, levelScale, 1,type='double3')
icQTapplyColor ( polyName, color1 )
#extrude again and set a height
extrude = cmds.polyExtrudeFacet(polyName,ch=True,kft=False)
cmds.setAttr((extrude[0] + ".localTranslateZ"), (lengthValue * levelHeight))
lengthValue = lengthValue * 0.8
#build a rim at the top
icQTbuildRim ( polyName, color2 )
def icQTbuildAntenna( polyName, lengthValue ):
extrude=None
antennaHeight = random.uniform(0.6,1.5)
#extrude the face and scale it
extrude = cmds.polyExtrudeFacet(polyName, ch=True, kft=False)
cmds.setAttr((extrude[0] + ".localScale"),0.1, 0.1, 1, type='double3')
#colorize
colorNames = ["concrete1","concrete2","dark1","dark2","light1","light2"]
randColor = random.uniform(0,5.9)
icQTapplyColor ( polyName, colorNames[randColor] )
#extrude again and set a height
extrude = cmds.polyExtrudeFacet(polyName,ch=True,kft=False)
cmds.setAttr((extrude[0] + ".localTranslateZ"), (lengthValue * antennaHeight))
cmds.setAttr((extrude[0] + ".localScale"), 0.1, 0.1, 1, type='double3')
def icQTsplitSecondLevel( polyName, nodeName ):
extrude=None
compList=None
vertList=None
edgeNum=None
vertNum=None
faceNum=None
edgePos=None
edgeLengthArray=None
lengthValue=None
lengthValueList=None
splitValue=None
edgeStart=None
edgeEnd=None
edgeLength=None
randomEdge=None
edgeList=None
parallel=None
divisions=None
#convert the face to an edge selection
cmds.select(cmds.polyListComponentConversion(polyName,ff=True,te=True))
#get the numbers of the selected edges
compList = icQTgetSelectionNumbers()
edgeNum = compList.split(":")
#find the edge lengths
for e in edgeNum:
lengthValue = 0
edgePos = cmds.xform(query=True,ws=True,t=(nodeName + ".e[" + e + "]"))
edgeLengthArray = [edgePos[0]-edgePos[3],edgePos[1]-edgePos[4],edgePos[2]-edgePos[5]]
for el in edgeLengthArray:
if (el > lengthValue):
lengthValue = el
lengthValueList[len(lengthValueList)] = lengthValue
#pick an edge from the list that defines the length of the edge pair
randomEdge = random.uniform(0,(len(lengthValueList) - 1))
#find the edge pair that is the size of the random edge
for i in range(0,len(lengthValueList)):
if (lengthValueList[i] > lengthValueList[randomEdge] - 0.001 and lengthValueList[i] < lengthValueList[randomEdge] + 0.001):
edgeList[len(edgeList)] = edgeNum[i]
#if the polygon is nonsquare take the first two edges
if (len(edgeList) != 2):
edgeList[0] = edgeNum[0]
for i in range(1,len(lengthValueList)):
if (parallel == 0):
edgeList[1] = edgeNum[i]
cmds.select((nodeName + ".e[" + edgeList[i] + "]"),r=(nodeName + ".e[" + edgeList[0] + "]"))
#convert the edge to a vertex selection
cmds.select(cmds.polyListComponentConversion(fe=True,tv=True))
#get the vertex selection
vertList = icQTgetSelectionNumbers()
vertNum = vertList.split(":")
#if there are only three verts selected the two edges connect
#in this case get the next edge
if (len(vertNum) == 4):
parallel = 1
for i in range(0,len(edgeNum)):
if (edgeNum[i] != edgeList[0] and edgeNum[i] != edgeList[1]):
edgeList[len(edgeList)] = edgeNum[i]
#generate a split value
splitValue = random.uniform(0.25,0.75)
#and split the polygon
cmds.polySplit(ch=1,s=1,sma=90,ep=[(edgeList[0],splitValue),(edgeList[1],splitValue)])
#colorize
colorNames = ["concrete1","concrete2","dark1","dark2","light1","light2"]
randColor = random.uniform(0,5.9)
icQTapplyColor ( polyName, colorNames[randColor] )
#extrude and set a height
extrude = cmds.polyExtrudeFacet(polyName,ch=1,kft=0)
cmds.setAttr((extrude[0] + ".localTranslateZ"),(min((lengthValue * splitValue),(lengthValue / 10))))
def icQTdivideSecondLevel( polyName, nodeName ):
compList=None
faceNum=None
extrude=None
divisions=None
#subdivide the top
cmds.select(polyName,r=True)
divisions = random.uniform(1,2)
cmds.polySmooth(polyName,mth=0,dv=divisions,c=0,kb=1,sl=1,ch=1)
#get the numbers of the selected faces
compList = icQTgetSelectionNumbers()
faceNum=compList.split(":")
#go through each face and extrude it by random
for f in faceNum:
skip = random.uniform(-2,4)
if (skip < 0):
polyName = nodeName + ".f[" + f + "]"
lengthValue = icQTinnerExtrude ( polyName, nodeName, "" )
extrude = cmds.polyExtrudeFacet(polyName,ch=1,kft=0)
cmds.setAttr((extrude[0] + ".localTranslateZ"), (lengthValue / 2))
def icQTroofCube(polyName, nodeName, color ):
extrude=None
moveValMult=None
bbox1=None
bbox2=None
moveValList=None
moveValIndex=None
randSign=None
#assign the random scale values
sizeMin = cmds.floatFieldGrp(query=True,v1='sizeRange')
sizeMax = cmds.floatFieldGrp(query=True,v2='sizeRange')
randSX = random.uniform(sizeMin,sizeMax)
randSY = random.uniform(sizeMin,sizeMax)
#get the initial bounding box of the selection
bbox1 = cmds.xform(polyName,query=True,bb=True)
#extrude the face and scale it
extrude = cmds.polyExtrudeFacet(polyName,ch=1,kft=0)
cmds.setAttr((extrude[0] + ".localScale"),randSX, randSY, 1,type='double3')
#get the new bounding box size
bbox2 = cmds.xform(polyName,query=True,bb=True)
icQTapplyColor ( polyName, color )
#define a offset multiplier for each polygon
moveValMult = random.uniform(0.6,0.9)
moveValList[0] = (abs(bbox1[3]) - abs(bbox2[3])) * moveValMult
moveValList[1] = (abs(bbox1[5]) - abs(bbox2[5])) * moveValMult
#create a random index to choose if the translation is in X or Y
moveValIndex = random.uniform(0,1.9)
#create a random sign for the translation
randSign = random.uniform(-1.9,1.9)
if (moveValIndex == 0):
cmds.setAttr((extrude[0] + ".localTranslateX"), (moveValList[0] * randSign))
else:
cmds.setAttr((extrude[0] + ".localTranslateY"), (moveValList[1] * randSign))
#extrude and set a height
extrude = cmds.polyExtrudeFacet(polyName,ch=1,kft=0)
cmds.setAttr((extrude[0] + ".localTranslateZ"), (abs(moveValList[moveValIndex]) * 0.5))
#build a rim at the top
icQTbuildRim ( polyName, color )
def icQTtopDetail( topPoly, scaleHeight ):
roofCube=None
extrude=None
bbFloor=None
bbCube=None
floorSize=None
floorCenterX=None
floorCenterZ=None
cubeSize=None
scaleMax=None
height=None
randSizeX=None
randSizeZ=None
moveRangeX=None
moveRangeZ=None
randMoveX=None
randMoveZ=None
randType=None
bbFloor = cmds.xform(topPoly,query=True,bb=True)
floorSize = (abs(bbFloor[0] - bbFloor[3])) * (abs(bbFloor[2] - bbFloor[5]))
floorCenterX = (bbFloor[0] + bbFloor[3]) / 2
floorCenterZ = (bbFloor[2] + bbFloor[5]) / 2
randType = random.uniform(0,3.5)
if (randType == 1):
roofCube = icQTsingleCube(0)
#colorize
colorNames = ["concrete1","concrete2","dark1","dark2","light1","light2"]
randColor = random.uniform(0,5.9)
icQTapplyColor ( roofCube, colorNames[randColor] )
elif (randType == 2):
roofCube = icQTlShape()
elif (randType == 3):
roofCube = icQTtriplet()
else:
return
#place the cube at the center of the selected polygon
cmds.xform(roofCube,ws=True,t=(floorCenterX, bbFloor[4], floorCenterZ))
#and get its dimensions
bbCube = cmds.xform(roofCube,query=True, bb=True)
cubeSize = (abs(bbCube[0] - bbCube[3])) * (abs(bbCube[2] - bbCube[5]))
userHeight = cmds.floatFieldGrp(query=True,v1='detailHeight')
userHeight = random.uniform(userHeight * 0.8, userHeight * 1.2)
if (scaleHeight > 0):
height = userHeight + (userHeight / 10 * scaleHeight)
else:
height = userHeight
#scale and position the cube
scaleMax = math.sqrt(floorSize / cubeSize)
randSizeX = random.uniform(scaleMax/2,scaleMax)
randSizeZ = random.uniform(randSizeX/2,randSizeX)
cmds.scale(roofCube,a=[randSizeX,height,randSizeZ])
#freeze the transforms and get the new size
cmds.makeIdentity(roofCube,t=1,s=1,a=1)
bbCube = cmds.xform(roofCube,query=True,bb=True)
#calculate the moving range in X and Z
moveRangeX = bbFloor[3] - bbCube[3]
moveRangeZ = bbFloor[5] - bbCube[5]
randMoveX = random.uniform(moveRangeX * -1,moveRangeX)
randMoveZ = random.uniform(moveRangeZ * -1,moveRangeZ)
cmds.move(randMoveX, 0, randMoveZ,roofCube,r=True)
#if the size is too big reduce its scale
bbCube = cmds.xform(roofCube,query=True,bb=True)
for i in range(0,10):
if ((bbCube[0] < bbFloor[0])or(bbCube[3] > bbFloor[3])or(bbCube[2] < bbFloor[2]) or (bbCube[5] > bbFloor[5])):
#move it back and scale
cmds.move(((randMoveX/10) * -1), 0, ((randMoveZ/10) * -1),roofCube,r=True)
cmds.scale(roofCube,a=[0.9, 1, 0.9])
cmds.makeIdentity(roofCube, s=1, a=1)
#get the new bounding box
bbCube = cmds.xform(roofCube, query=True, bb=True)
if ((bbCube[0] < bbFloor[0])or(bbCube[3] > bbFloor[3])or(bbCube[2] < bbFloor[2]) or (bbCube[5] > bbFloor[5])):
#if its still too big delete it
cmds.delete(roofCube)
#if not already present create a group and parent the new geometry under it
if (not(cmds.objExists('topCubes'))):
cmds.createNode(transform=True,n='topCubes')
if (cmds.objExists(roofCube and cmds.objExists('topCubes'))):
cmds.parent(roofCube,'topCubes')
def icQTsingleCube(wells):
roofCube=None
#create a cube with the pivot at the bottom
roofCube = cmds.polyCube(ch=0)
#delete the bottom face
cmds.delete(roofCube[0] + ".f[3]")
cmds.move(0, 0.5, 0,roofCube[0],a=True)
cmds.makeIdentity(roofCube[0],t=1,a=1)
cmds.xform(roofCube[0],ws=True,piv=(0, 0, 0))
rim = random.uniform(0,5)
if (rim < 2 and wells == 0):
#build a rim at the top
icQTbuildRim( roofCube[0] + ".f[1]", "" )
return roofCube[0]
def icQTlShape():
roofCube=None
extrude=None
zero=None
worldPos=None
randRot=None
#create a cube with the pivot at the bottom
roofCube = cmds.polyCube(sx=2,sz=2,ch=0)
#delete the bottom faces
cmds.delete(roofCube[0] + ".f[8:11]")
#move the first two faces along their normals
cmds.polyMoveFacet(ltz=(random.randint(0,2)) (roofCube[0] + ".f[0:1]"))
#extrude one face
extrude = cmds.polyExtrudeFacet((roofCube[0] + ".f[13]"),ch=1,kft=0)
cmds.setAttr((extrude[0] + ".localTranslateZ"), (random.uniform(0.5,2)))
cmds.DeleteHistory(roofCube[0])
#colorize
colorNames = ["concrete1","concrete2","dark1","dark2","light1","light2"]
randColor = random.uniform(0,5.9)
icQTapplyColor ( roofCube[0], colorNames[randColor] )
#move the cube so that its pivot is centered at the bottom
cmds.select(cl=True)
cmds.move(0, 0.5, 0,roofCube[0],a=True)
cmds.makeIdentity(roofCube[0],t=1,a=1)
cmds.select(roofCube[0])
cmds.CenterPivot()
cmds.xform(roofCube[0], r=True, piv=(0, -0.5, 0))
# save the position of the object
worldPos = cmds.xform(roofCube[0],query=True,ws=True,rp=True)
# create a new locator and move it to the position of the object
zero = cmds.spaceLocator()
cmds.move(worldPos[0], worldPos[1], worldPos[2],zero[0],a=True)
# parent the object to the locator
cmds.parent(roofCube[0],zero[0])
# move the locator back to the world origin
cmds.move(0, 0, 0,zero[0],a=True)
# unparent the object and delete the locator
cmds.parent(roofCube[0],w=True)
cmds.delete(zero[0])
# freeze the position transformation of the object
cmds.makeIdentity(a=1,t=1,r=0,s=0)
# move the object back to the old position
cmds.move(worldPos[0], worldPos[1], worldPos[2],roofCube[0],a=True)
randRot = random.randint(0,10)
for i in range(0,randRot):
cmds.rotate(roofCube[0],os=[0, 90, 0])
return roofCube[0]
def icQTtriplet():
duplicate=None
combineList=None
roofCube = icQTsingleCube(0)
moveVal = random.uniform(1.5,3)
#colorize
colorNames = ["concrete1","concrete2","dark1","dark2","light1","light2"]
randColor = random.uniform(0,5.9)
icQTapplyColor( roofCube, colorNames[randColor] )
#scale the first cube and store it
cmds.scale(roofCube, 1, 1, (random.randint(2,4)))
combineList[len(combineList)] = roofCube
rim = random.randint(0,5)
if (rim < 2):
#build a rim at the top
icQTbuildRim ( roofCube + ".f[1]", "" )
#duplicate the first cube and offset to x
duplicate = cmds.duplicate(rr=True,rc=roofCube)
combineList[len(combineList)] = duplicate[0]
cmds.move(moveVal,0, 0, duplicate[0],a=True)
#duplicate a second time and offset to -x
duplicate = cmds.duplicate(roofCube,rr=True,rc=True)
combineList[len(combineList)] = duplicate[0]
cmds.move((moveVal * -1), 0, 0, duplicate[0], a=True)
#combine the three cubes
triplet = cmds.polyUnite(combineList[0], combineList[1], combineList[2], ch=0)
#delete the transform nodes of the other cubes
for i in range(1,3):
if (cmds.objExists(combineList[i])):
cmds.delete(combineList[i])
randRot = random.randint(0,10)
for i in range(0,randRot):
cmds.rotate(0, 90, 0, triplet[0],os=True)
return triplet[0]
def icQTwells( length, size ):
duplicate=None
combineList=None
command=None
roofCube = icQTsingleCube(1)
moveVal = random.uniform(1.5,3)
wellSpace = random.randint(2,6)
width=None
wellCount = random.uniform(3,9.9)
#colorize
colorNames = ["concrete1","concrete2","dark1","dark2","light1","light2"]
randColor = random.uniform(0,5.9)
icQTapplyColor( roofCube, colorNames[randColor] )
#calculate the width from the given length and size
width = size / length
#scale the first cube
cmds.scale((length / wellCount / wellSpace), 1, 1, roofCube)
#store the first cube
combineList[size(combineList)] = roofCube
#position it at the -x end of the length
cmds.move((length / -2), 0, 0, roofCube, a=True)
#duplicate the first cube and offset to x
for i in range(1,wellCount):
duplicate = cmds.duplicate(rr=True,rc=roofCube)
combineList[len(combineList)] = duplicate[0]
cmds.move((length / wellCount * i), 0, 0, duplicate[0],r=True)
#combine the cubes
command = "polyUnite -ch 0 "
for item in combineList:
command = command + item + " "
triplet = cmds.eval(command)
#delete the transform nodes of the other cubes
for i in range(1,len(combineList)):
if (cmds.objExists(combineList[i])):
cmds.delete(combineList[i])
#scale the wells for the width
cmds.scale( 1, 1, (width * 1.05), triplet[0],a=True)
return triplet[0]
def icQTbuildRim( polyName, color ):
if (color == ""):
#colorize
colorNames = ["rim1","rim2"]
randColor = random.uniform(0,1.9)
icQTapplyColor ( polyName, colorNames[randColor] )
else:
icQTapplyColor ( polyName, color )
#build a rim at the top
extrude = cmds.polyExtrudeFacet(polyName,ch=1,kft=0)
cmds.setAttr((extrude[0] + ".localScale"), 0.95, 0.95, 1,type='double3')
extrude = cmds.polyExtrudeFacet(polyName,ch=1,kft=0)
cmds.setAttr((extrude[0] + ".localTranslateZ"), -0.1)
def icTQcreateColors():
cityShader=None
citySG=None
add=None
baseColors = [0.6,0.6,0.6,0.7,0.7,0.7,
0.4,0.4,0.4,0.6,0.38,0.32,
0.47,0.23,0.17,0.36,0.48,0.55,
0.2,0.24,0.32,0.73,0.66,0.38,
0.6,0.51,0.34,
0.25,0.25,0.25,0.12,0.12,0.12,
0.85,0.85,0.85,0.96,0.96,0.96,
0.3,0.3,0.3,0.5,0.5,0.5]
colorNames = ["street","concrete1","concrete2",
"brick1","brick2","glass1","glass2",
"sand1","sand2","dark1","dark2","light1","light2",
"rim1","rim2"]
#create the shader
for i in range(0,len(colorNames)):
if (not(cmds.objExists(colorNames[i] + "_SH"))):
cityShader = cmds.shadingNode(asShader='lambert',n=(colorNames[i] + "_SH"))
citySG = cmds.sets(r=1,nss=1,em=True,n=(colorNames[i] + "_SG"))
cmds.connectAttr((cityShader + ".outColor"), (citySG + ".surfaceShader"),f=True)
cmds.setAttr((colorNames[i] + "_SH.color"), baseColors[i + add], baseColors[i + 1 + add], baseColors[i + 2 + add])
add = add + 2
def icQTapplyColor( polyName, color ):
if (cmds.checkBox(query=True,v='applyColor' and cmds.objExists(color + "_SG"))):
cmds.sets(polyName,e=True,forceElement=(color + "_SG"))
def icQTaddToSet(polyName,type):
#check if the set exists
if (not(cmds.objExists (type + "_set"))):
sel = cmds.ls(sl=True)
#clear any selection so that an empty set is created
cmds.select(cl=True)
cmds.sets(n=(type + "_set"))
cmds.select(cl=True)
for j in sel:
cmds.select(j,add=True)
#add the polygons to the set
cmds.sets(polyName,e=True,fe=(type + "_set"))
def icQTcurvedStateChange():
if (cmds.checkBox('curvedCheck',query=True,v=True)):
cmds.checkBox('topDetail',e=True,v=0)
cmds.checkBox('topDetail',e=True,en=0)
cmds.radioButtonGrp('complexity',e=True,en=0)
cmds.floatFieldGrp('detailHeight',e=True,en=0)
cmds.checkBox('wellsCheck',e=True,v=0)
cmds.checkBox('planarCheck',e=True,v=1)
cmds.checkBox('wellsCheck',e=True,en=0)
cmds.checkBox('planarCheck',e=True,en=0)
else:
cmds.checkBox('topDetail',e=True,v=1)
cmds.checkBox('topDetail',e=True,en=1)
cmds.radioButtonGrp('complexity',e=True,en=1)
cmds.floatFieldGrp('detailHeight',e=True,en=1)
cmds.checkBox('wellsCheck',e=True,en=1)
cmds.checkBox('planarCheck',e=True,en=1)
def icQTown ():
#declare a new window name
win = "icQtown"
#is the window already open?
if (cmds.window(win,exists=True)):
cmds.deleteUI(win)
cmds.window(win,t="QTown v1.3",w=380,h=530)
if (cmds.windowPref(win,exists=True)):
cmds.windowPref(win,e=True,wh=(380, 530))
#create the form
form = cmds.formLayout("parentForm")
#create the frames in the form
generalFrame = cmds.frameLayout('generalFrame',l="General",w=380,h=190,li=3,mw=3,mh=3,bs="etchedIn")
cmds.setParent(form)
#create the frames in the form
additionalFrame = cmds.frameLayout('additionalFrame',l="Additional",w=380,h=190,li=3,mw=3,mh=3,bs="etchedIn")
cmds.setParent(form)
#create the buttons
bExecute = cmds.button('executeButton',l="Build QTown",h=30,en=1,c="icQTexecute")
#position the elements
cmds.formLayout(form,e=True,
af=[(generalFrame, "top", 3),(generalFrame, "left", 3),(generalFrame, "right", 3),
(additionalFrame, "left", 3),(additionalFrame, "right", 3),(bExecute, "bottom", 3),
(bExecute, "left", 3),(bExecute, "right", 3)],
ac=[(additionalFrame, "top", 3, generalFrame),(additionalFrame, "bottom", 3, bExecute)],
an=[(generalFrame, "bottom"),(bExecute, "top")])
#create the elements for the uiFrame section
cmds.setParent(generalFrame)
cmds.columnLayout()
cmds.floatFieldGrp('dropout', l="Dropout %",w=180,cw2=(120, 60),
ct2=("left", "left"),co2=( 0, 0),cl2=("left", "left"),
pre=0,v1=10)
cmds.floatFieldGrp('heightRange',l="Height Range",w=280,nf=2,cw3=( 120, 80, 80),
ct3=("left", "left", "left"), co3=(0, 0, 0), cl3=("left", "left", "left"),
pre=2,v1=0.2,v2=3)
cmds.floatFieldGrp('minScale',l="Min Scale",w=180,cw2=(120, 60),
ct2=("left", "left"), co2=(0, 0),cl2=("left", "left"),
v1=0.5)
cmds.checkBox('planarCheck',l="Planar Surface",v=1)
cmds.text(l="",h=10)
cmds.checkBox('curvedCheck',l="Curved Surface",v=0,cc=icQTcurvedStateChange())
cmds.text(l="",h=10)
cmds.checkBox('applyColor',l="Colorize",v=0)
cmds.text(l="",h=10)
cmds.setParent(additionalFrame)
cmds.columnLayout()
cmds.rowColumnLayout(nc=2,cw=[(1, 100),(2, 280)])
cmds.checkBox('stairs',l="Stairs",v=1)
cmds.radioButtonGrp(nrb=4,l="Max Level Steps",h=20,cl5=["left", "left", "left", "left", "left"],
cw5=[120, 35, 35, 35, 35],ct5=["left", "both", "both", "both", "both"],
la4=["2", "3", "4", "5"],sl=2)
cmds.text(l="", h=10)
cmds.text(l="", h=10)
cmds.checkBox('stacksCheck', l="Stacks", v=0)
cmds.text( l="", h=20)
cmds.text(l="", h=10)
cmds.text(l="", h=10)
cmds.checkBox('wellsCheck', l="Wells", v=1)
cmds.floatFieldGrp('wellsOccur', l="Well Occurrence", w=180, cw2=[120, 60],
ct2=["left", "left"], co2=[0, 0], cl2=["left", "left"],
pre=0, v1=1)
cmds.text(l="", h=10)
cmds.text(l="", h=10)
cmds.checkBox('shrinkCheck', l="Shrink", v=0)
cmds.text(l="", h=10)
cmds.text(l="", h=14)
cmds.text(l="", h=14)
cmds.checkBox('roofCube',l="Roof Cube",v=1)
cmds.floatFieldGrp('sizeRange', l="Size Range", w=280, nf=2, cw3=[120, 80, 80],
ct3=["left", "left", "left"], co3=[0, 0, 0], cl3=["left", "left", "left"],
pre=2, v1=0.25, v2=0.4)
cmds.text(l="", h=10)
cmds.text(l="", h=10)
cmds.checkBox('topDetail', l="Top Detail", v=1)
cmds.radioButtonGrp('complexity', nrb=3, l="Detail Complexity", h=20, cl4=["left", "left", "left", "left"],
cw4=[120, 35, 35, 35], ct4=["left", "both", "both", "both"],
la3=["1", "2", "3"], sl=1)
cmds.text(l="", h=5)
cmds.text(l="", h=5)
cmds.text(l="", h=20)
cmds.floatFieldGrp('detailHeight', l="Detail Height", w=180, cw2=[120, 60],
ct2=["left", "left"], co2=[0, 0], cl2=["left", "left"],
v1=0.5)
cmds.text(l="", h=10)
cmds.text(l="", h=10)
cmds.checkBox('antenna', l="Antenna", v=1)
cmds.setParent(form)
#make the window visible
cmds.showWindow(win)
cmds.polyPlane(n='myPlane', sx=25, sy=25, w=29.056873, h=29.827317)
# use F11 to get face mode ... hold shift to get more than one face
icQTown() | [
"panhtran249@gmail.com"
] | panhtran249@gmail.com |
d1a417856e0372940c7300b1fec9088b1beff141 | 8411d44bb4c1316755311beaab8cc4c3ec78475e | /dungeonpath.py | b7c46d4d7423584870852dcb4f55db1d221a3a03 | [] | no_license | jrecuero/rpg3 | 45b40908ff39b692e4a68c958383db0946d9e306 | ee961023841f79c22d21c8a4c7a92225d5525c7a | refs/heads/master | 2021-01-19T07:30:46.996853 | 2014-11-25T08:20:06 | 2014-11-25T08:20:06 | 16,552,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,093 | py | #!/usr/bin/env python
"""dungeonpath.py class required for the dungeonpath.
:author: Jose Carlos Recuero
:version: 0.1
:since: 10/21/2014
"""
__docformat__ = 'restructuredtext en'
###############################################################################
## _ _
## (_)_ __ ___ _ __ ___ _ __| |_ ___
## | | '_ ` _ \| '_ \ / _ \| '__| __/ __|
## | | | | | | | |_) | (_) | | | |_\__ \
## |_|_| |_| |_| .__/ \___/|_| \__|___/
## |_|
###############################################################################
#
# import std python modules
#
#
# import dungeonpath python modules
#
import objecto
import dungeonstep
#import player
###############################################################################
##
## ___ ___ _ __ ___| |_ __ _ _ __ | |_ ___
## / __/ _ \| '_ \/ __| __/ _` | '_ \| __/ __|
## | (_| (_) | | | \__ \ || (_| | | | | |_\__ \
## \___\___/|_| |_|___/\__\__,_|_| |_|\__|___/
##
###############################################################################
#
FORWARD = 'forward'
BACKWARD = 'backward'
STOP = 'stop'
RIGHT_TURN = 'right turn'
LEFT_TURN = 'left turn'
UPSIDE = 'upside'
DOWNSIDE = 'downside'
###############################################################################
## _ _ _
## ___ _ _| |__ _ __ ___ _ _| |_(_)_ __ ___ ___
## / __| | | | '_ \| '__/ _ \| | | | __| | '_ \ / _ \/ __|
## \__ \ |_| | |_) | | | (_) | |_| | |_| | | | | __/\__ \
## |___/\__,_|_.__/|_| \___/ \__,_|\__|_|_| |_|\___||___/
##
###############################################################################
#
###############################################################################
## _ _ __ _ _ _ _
## ___| | __ _ ___ ___ __| | ___ / _(_)_ __ (_) |_(_) ___ _ __ ___
## / __| |/ _` / __/ __| / _` |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \/ __|
## | (__| | (_| \__ \__ \ | (_| | __/ _| | | | | | |_| | (_) | | | \__ \
## \___|_|\__,_|___/___/ \__,_|\___|_| |_|_| |_|_|\__|_|\___/|_| |_|___/
##
###############################################################################
#
#
#------------------------------------------------------------------------------
class DungeonPath(objecto.Objecto):
"""
Dungeon path is composed of dungeon steps.
Position in the dungeon path uses x coordination for the dungeon step and
the y coordinate for the dungeon cell in the given dungeon step.
"""
#--------------------------------------------------------------------------
def __init__(self, theName=None):
""" Initialize DungeonPath instance
:type theName: str
:param theName: DungeonPath name
"""
super(DungeonPath, self).__init__(theName)
self.path = self.baseLinearPath()
self.players = []
#--------------------------------------------------------------------------
def baseLinearPath(self, theLen=100, theWide=3):
""" Create a basic linear path.
:type theLen: int
:param theLen: length of the path
:type theWide: int
:param theWide: wide for every path step
:rtype: list
:return: list the newly created path
"""
path = []
for x in xrange(theLen):
path.append(dungeonstep.DungeonStep(theWide))
return path
#--------------------------------------------------------------------------
def addPlayer(self, thePlayer, thePathPos=None):
""" Add a player to the path.
:type thePlayer: player.Player
:param thePlayer: player instance to be added to the path
:type thePathPos: point
:param thePathPos: position in the path
:rtype: bool
:return: True if player was added to the path, else False
"""
if thePlayer not in self.players:
thePlayer.dungeonpath = {'path': self,
'pathpos': thePathPos,
'pathmove': 1,
'pathdir': FORWARD}
self.players.append(thePlayer)
return True
else:
self.logger.error('Player %s was already in the path' % (thePlayer.name, ))
return False
#--------------------------------------------------------------------------
def removePlayer(self, thePlayer):
""" Remove a player from the path.
:type thePlayer: player.Player
:param thePlayer: player instance to be removed from the path
:rtype: bool
:return: True if player was removed from the path, else False
"""
reto = True
try:
thePlayer.dungeonpath = None
self.players.remove(thePlayer)
except ValueError:
self.logger.error('Player %s was not in the path' % (thePlayer.name, ))
reto = False
finally:
return reto
#--------------------------------------------------------------------------
def placePlayer(self, thePlayer, thePathPos):
""" Set the player in a given position
:type thePlayer: player.Player
:param thePlayer: player instance to be added to the path
:type thePathPos: point
:param thePathPos: position in the path
:rtype: bool
:return: True if player was added to the path, else False
"""
if thePlayer in self.players:
thePlayer.dungeonpath['pathpos'] = thePathPos
return True
else:
self.logger.error('Player %s was not in the path' % (thePlayer.name, ))
return False
#--------------------------------------------------------------------------
def movePlayer(self, thePlayer):
""" Move a player in the path
:type thePlayer: player.Player
:param thePlayer: player to move
"""
posX, posY = thePlayer.dungeonpath['pathpos']
if thePlayer.dungeonpath['pathdir'] == FORWARD:
posX += thePlayer.dungeonpath['pathmove']
elif thePlayer.dungeonpath['pathdir'] == BACKWARD:
posX -= thePlayer.dungeonpath['pathmove']
elif thePlayer.dungeonpath['pathdir'] == STOP:
pass
thePlayer.dungeonpath['pathpos'] == (posX, posY)
# Now reset to default player movement data.
thePlayer.dungeonpath['pathmove'] = 1
thePlayer.dungeonpath['pathdir'] = FORWARD
#--------------------------------------------------------------------------
def movePath(self):
""" Move all players in the dungeon path.
"""
map(self.movePlayer, self.players)
###############################################################################
## _
## _ __ ___ __ _(_)_ __
## | '_ ` _ \ / _` | | '_ \
## | | | | | | (_| | | | | |
## |_| |_| |_|\__,_|_|_| |_|
##
###############################################################################
#
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"jose.recuero@gmail.com"
] | jose.recuero@gmail.com |
8bb4e54a6ffca03f781beddb041de88e048ab536 | d0765d7c9977f892f0fd4c623107303ff3582485 | /kaggle/facial-keypoints-detection/testcustom.py | 31654fb9eae296e5d9fe7e0ba1442c0b7be43952 | [] | no_license | Lizonghang/Neural-Network | dc82cfa96d1eafcc192374645b6113774cd1cbf2 | be6ddff70022e1933c1c9ad4b04ef0ac3fcf2f70 | refs/heads/master | 2021-01-01T18:39:06.416372 | 2017-12-26T15:33:09 | 2017-12-26T15:33:09 | 98,390,467 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
def load_image(filepath):
im = Image.open(filepath)
im = im.convert('L')
im = im.resize((96, 96))
return np.array(im).reshape((1, 96, 96, 1)) / 255.0
def display(X, y_pred):
plt.figure()
plt.imshow(X.reshape((96, 96)), cmap='gray')
plt.axis('off')
y_pred = y_pred.clip(0, 1)
plt.scatter(y_pred[0::2] * 96.0, y_pred[1::2] * 96.0, c='r', marker='x')
plt.show()
if __name__ == '__main__':
model = load_model('ckpt/model.h5')
X = load_image('test1.png')
y_pred = model.predict(X).reshape((30,))
display(X, y_pred)
| [
"870644199@qq.com"
] | 870644199@qq.com |
a4188b86cd08a539db64761d64b3dffac695aa6b | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/talent/v4/talent-v4-py/google/cloud/talent_v4/services/tenant_service/transports/grpc.py | a2f0584fc66609fd76ff2cf8c42180bbaa904c65 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,551 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.talent_v4.types import tenant
from google.cloud.talent_v4.types import tenant as gct_tenant
from google.cloud.talent_v4.types import tenant_service
from google.protobuf import empty_pb2 as empty # type: ignore
from .base import TenantServiceTransport, DEFAULT_CLIENT_INFO
class TenantServiceGrpcTransport(TenantServiceTransport):
"""gRPC backend transport for TenantService.
A service that handles tenant management, including CRUD and
enumeration.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'jobs.googleapis.com',
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'jobs.googleapis.com',
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def create_tenant(self) -> Callable[
[tenant_service.CreateTenantRequest],
gct_tenant.Tenant]:
r"""Return a callable for the create tenant method over gRPC.
Creates a new tenant entity.
Returns:
Callable[[~.CreateTenantRequest],
~.Tenant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_tenant' not in self._stubs:
self._stubs['create_tenant'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4.TenantService/CreateTenant',
request_serializer=tenant_service.CreateTenantRequest.serialize,
response_deserializer=gct_tenant.Tenant.deserialize,
)
return self._stubs['create_tenant']
@property
def get_tenant(self) -> Callable[
[tenant_service.GetTenantRequest],
tenant.Tenant]:
r"""Return a callable for the get tenant method over gRPC.
Retrieves specified tenant.
Returns:
Callable[[~.GetTenantRequest],
~.Tenant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_tenant' not in self._stubs:
self._stubs['get_tenant'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4.TenantService/GetTenant',
request_serializer=tenant_service.GetTenantRequest.serialize,
response_deserializer=tenant.Tenant.deserialize,
)
return self._stubs['get_tenant']
@property
def update_tenant(self) -> Callable[
[tenant_service.UpdateTenantRequest],
gct_tenant.Tenant]:
r"""Return a callable for the update tenant method over gRPC.
Updates specified tenant.
Returns:
Callable[[~.UpdateTenantRequest],
~.Tenant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_tenant' not in self._stubs:
self._stubs['update_tenant'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4.TenantService/UpdateTenant',
request_serializer=tenant_service.UpdateTenantRequest.serialize,
response_deserializer=gct_tenant.Tenant.deserialize,
)
return self._stubs['update_tenant']
@property
def delete_tenant(self) -> Callable[
[tenant_service.DeleteTenantRequest],
empty.Empty]:
r"""Return a callable for the delete tenant method over gRPC.
Deletes specified tenant.
Returns:
Callable[[~.DeleteTenantRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_tenant' not in self._stubs:
self._stubs['delete_tenant'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4.TenantService/DeleteTenant',
request_serializer=tenant_service.DeleteTenantRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs['delete_tenant']
@property
def list_tenants(self) -> Callable[
[tenant_service.ListTenantsRequest],
tenant_service.ListTenantsResponse]:
r"""Return a callable for the list tenants method over gRPC.
Lists all tenants associated with the project.
Returns:
Callable[[~.ListTenantsRequest],
~.ListTenantsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_tenants' not in self._stubs:
self._stubs['list_tenants'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4.TenantService/ListTenants',
request_serializer=tenant_service.ListTenantsRequest.serialize,
response_deserializer=tenant_service.ListTenantsResponse.deserialize,
)
return self._stubs['list_tenants']
__all__ = (
'TenantServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
b6aa7142f269f0ba1dd4f2de043759be68235d84 | fbe7e4cad853f2bbabadc4597cdb9bb96d27bbbf | /Simulation/17143_boj_낚시왕(Simulation)/17143_boj_fishing.py | 2fea7ae7ada0e3ff5fcc083104971d99c861aa80 | [] | no_license | edugieun/Algorithm-Solving | 67e649b894aede10e4f61ebf30d0ddbac67dd4db | a925657b893cc9877c8dbf1b986323e474872204 | refs/heads/master | 2020-09-08T20:02:14.085427 | 2020-04-15T13:30:31 | 2020-04-15T13:30:31 | 221,231,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,056 | py | import sys, time
sys.stdin = open('input.txt', 'r')
start = time.time()
def shark_change():
# 위치 변경
for shark_num in sharks.keys():
# 위
if sharks[shark_num][3] == 1:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
sharks[shark_num][0] -= sharks[shark_num][2]
while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
if sharks[shark_num][3] == 1:
sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
sharks[shark_num][3] = 2
elif sharks[shark_num][3] == 2:
sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
sharks[shark_num][3] = 1
# 아래
elif sharks[shark_num][3] == 2:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
sharks[shark_num][0] += sharks[shark_num][2]
while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
if sharks[shark_num][3] == 1:
sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
sharks[shark_num][3] = 2
elif sharks[shark_num][3] == 2:
sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
sharks[shark_num][3] = 1
# 오른쪽
elif sharks[shark_num][3] == 3:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
sharks[shark_num][1] += sharks[shark_num][2]
while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
if sharks[shark_num][3] == 3:
sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
sharks[shark_num][3] = 4
elif sharks[shark_num][3] == 4:
sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
sharks[shark_num][3] = 3
# 왼쪽
elif sharks[shark_num][3] == 4:
sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
sharks[shark_num][1] -= sharks[shark_num][2]
while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
if sharks[shark_num][3] == 3:
sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
sharks[shark_num][3] = 4
elif sharks[shark_num][3] == 4:
sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
sharks[shark_num][3] = 3
N_matrix = [[0] * (C + 1) for i in range(R + 1)]
# 동족 상잔
# 시간 초과 해결 방법:
# 상어가 10000마리 일 때 상어끼리 비교하기 위해, 10000*10000번을 읽는 것이 아니라 /
# 딱 상어 10000번만 읽되, matrix를 이용하여 겹치는 자리를 처리한다.
dead_shark = []
for shark_num in sharks.keys():
if N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] == 0:
N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] = shark_num
elif N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] != shark_num:
if sharks[N_matrix[sharks[shark_num][0]][sharks[shark_num][1]]][4] < sharks[shark_num][4]:
dead_shark.append(N_matrix[sharks[shark_num][0]][sharks[shark_num][1]])
N_matrix[sharks[shark_num][0]][sharks[shark_num][1]] = shark_num
else:
dead_shark.append(shark_num)
for shark_num in dead_shark:
del sharks[shark_num]
R, C, M = map(int, input().split())
sharks = {}
for i in range(M):
sharks['s' + str(i)] = list(map(int, input().split()))
shark_sum = 0
for person_pos in range(1, C + 1):
# 낚시
get_shark_row = 99999
for shark_num in sharks.keys():
if sharks[shark_num][1] == person_pos and sharks[shark_num][0] < get_shark_row:
get_shark = shark_num
get_shark_row = sharks[get_shark][0]
if get_shark_row != 99999:
shark_sum += sharks[get_shark][4]
del sharks[get_shark]
# 상어 위치 변경 및 동족상잔
shark_change()
print(shark_sum)
print(time.time() - start)
# 시간초과
# def shark_change():
#
# # 위치 변경
# for shark_num in sharks.keys():
# # 위
# if sharks[shark_num][3] == 1:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
# sharks[shark_num][0] -= sharks[shark_num][2]
# while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
# if sharks[shark_num][3] == 1:
# sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
# sharks[shark_num][3] = 2
# elif sharks[shark_num][3] == 2:
# sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
# sharks[shark_num][3] = 1
# # 아래
# elif sharks[shark_num][3] == 2:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * R - 2)
# sharks[shark_num][0] += sharks[shark_num][2]
# while sharks[shark_num][0] <= 0 or sharks[shark_num][0] > R:
# if sharks[shark_num][3] == 1:
# sharks[shark_num][0] = sharks[shark_num][0] * (-1) + 2
# sharks[shark_num][3] = 2
# elif sharks[shark_num][3] == 2:
# sharks[shark_num][0] = R - (sharks[shark_num][0] - R)
# sharks[shark_num][3] = 1
# # 오른쪽
# elif sharks[shark_num][3] == 3:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
# sharks[shark_num][1] += sharks[shark_num][2]
# while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
# if sharks[shark_num][3] == 3:
# sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
# sharks[shark_num][3] = 4
# elif sharks[shark_num][3] == 4:
# sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
# sharks[shark_num][3] = 3
# # 왼쪽
# elif sharks[shark_num][3] == 4:
# sharks[shark_num][2] = sharks[shark_num][2] % (2 * C - 2)
# sharks[shark_num][1] -= sharks[shark_num][2]
# while sharks[shark_num][1] > C or sharks[shark_num][1] <= 0:
# if sharks[shark_num][3] == 3:
# sharks[shark_num][1] = C - (sharks[shark_num][1] - C)
# sharks[shark_num][3] = 4
# elif sharks[shark_num][3] == 4:
# sharks[shark_num][1] = sharks[shark_num][1] * (-1) + 2
# sharks[shark_num][3] = 3
#
# #동족 상잔
# dead_shark = []
# for shark_num in sharks.keys():
# if shark_num not in dead_shark:
# for shark_num_next in sharks.keys():
# if shark_num_next not in dead_shark and shark_num != shark_num_next and sharks[shark_num][0] == sharks[shark_num_next][0] and sharks[shark_num][1] == sharks[shark_num_next][1]:
# if sharks[shark_num][4] > sharks[shark_num_next][4]:
# dead_shark.append(shark_num_next)
# elif sharks[shark_num][4] < sharks[shark_num_next][4]:
# dead_shark.append(shark_num)
# for shark_num in dead_shark:
# del sharks[shark_num]
#
# R, C, M = map(int, input().split())
#
# sharks = {}
# for i in range(M):
# sharks['s' + str(i)] = list(map(int, input().split()))
#
# shark_sum = 0
# for person_pos in range(1, C + 1):
# # 낚시
# get_shark_row = 99999
#
# for shark_num in sharks.keys():
# if sharks[shark_num][1] == person_pos and sharks[shark_num][0] < get_shark_row:
# get_shark = shark_num
# get_shark_row = sharks[get_shark][0]
#
# if get_shark_row != 99999:
# shark_sum += sharks[get_shark][4]
# del sharks[get_shark]
#
# # 상어 위치 변경 및 동족상잔
# shark_change()
#
#
# print(shark_sum) | [
"gieun625@gmail.com"
] | gieun625@gmail.com |
21f85fbde500311ba696c8db3619e60960f4c3e3 | 62200230fcff52c826685ed348e9c18b271b7f4e | /op2.22.py | de2aaac428162afa8b3b56b10b421270ccceeb6b | [] | no_license | manasa1463/program | ec149b141e0ee1bd19386de3985ee7cf6e268fe8 | df116cd552728d8f804d5292d924ad53722de629 | refs/heads/master | 2020-04-30T01:08:00.476647 | 2019-07-24T17:40:50 | 2019-07-24T17:40:50 | 176,520,835 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | x,y=map(int,input().split())
while(y):
x,y=y,x%y
print(x)
| [
"noreply@github.com"
] | manasa1463.noreply@github.com |
966336c9846841fe8a4a55d2f9b09814d3c843e9 | 6a165e7dafaec4d738a0a7fa54429a074049dfb6 | /vmagent/libs/instance.py | 84f7b520f767b4f87b967c99f80da72bf23fc2e6 | [] | no_license | zhouqiang-cl/wstack | 14bae1d4e7ecd1972483ca48c9789c7b004c3310 | cd636ccecfe020c1fe57041c84bb7afa93e05608 | refs/heads/master | 2020-01-23T21:57:37.975315 | 2016-11-16T15:25:51 | 2016-11-16T15:25:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,462 | py | #-*- coding: utf-8 -*-
""" 虚拟机实例函数.
"""
import os
import redis
from libs import (dnsapi, log, utils, volume, interface,
wmi, template, storage)
from const import DOMAIN, ISO_URLS, NAMESERVER
logger = log.LogHandler().logger
def create_origin(name, uuid, version, vcpu, mem, os_size, data_size,
ip, location, netmask, gateway, ks, bridge):
""" 用传统方法创建 instance.
"""
# 确认 IP 是否合法.
if not utils.is_valid_ip(ip):
message = "ip:{ip} is illegal".format(ip=ip)
logger.warning(message)
raise Exception(message)
# 如果没有 location, 则下载.
if not os.path.isfile(location):
cmd = ''' wget {url} -O {location}'''.format(
url=ISO_URLS[version], location=location)
utils.shell(cmd)
# 创建系统盘.
os_volume_path = volume.create(name, os_size)
# 执行安装操作.
cmd = """virt-install --name {name} --uuid {uuid} --vcpus={vcpu} --ram {mem} """\
"""--disk path={os_volume_path} -localtime --accelerate """\
"""--location={location} -x "ip={ip} netmask={netmask} gateway={gateway} """\
"""dns={dns} dnsdomain={dnsdomain} ks={ks} console=tty0 console=ttyS0,115200n8" """\
"""--nographics --network bridge={bridge} --noautoconsole &>/dev/null""".format(
name=name, uuid=uuid, vcpu=vcpu, mem=mem, os_volume_path=os_volume_path,
location=location, ip=ip, netmask=netmask, gateway=gateway, dns=NAMESERVER,
dnsdomain=DOMAIN, ks=ks, bridge=bridge)
utils.shell(cmd)
# 设置自动启动.
cmd = "virsh autostart {name}".format(name=name)
utils.shell(cmd)
# 创建数据盘, 盘的标识是 ${name}_data.
data_volume = name + "_data"
data_volume_path = volume.create(data_volume, data_size)
# 默认也会自动增加第二块网卡.
interface.add(name, "br1")
# 删除一些无用配置, 不删的话起不来(第二种存储结构有问题, 第一种没问题).
cmd = """sed -i '/<kernel>/d' /etc/libvirt/qemu/{name}.xml;
sed -i '/<initrd>/d' /etc/libvirt/qemu/{name}.xml;
sed -i '/<cmdline>/d' /etc/libvirt/qemu/{name}.xml;
virsh define /etc/libvirt/qemu/{name}.xml
""".format(name=name)
utils.shell(cmd)
# 这里是安装完成之后自动重启.
check_cmd = "virsh list | grep -q {name} ".format(name=name)
start_cmd = "sleep 1 && sh libs/guestfish_origin.sh {name} {uuid} && virsh start {name} && sleep 1 ".format(
name=name, uuid=uuid)
if utils.check_wait(check_cmd, start_cmd):
logger.info("post start {name} success".format(name=name))
else:
message = "post start {name} timeout".format(name=name)
logger.warning(message)
raise Exception(message)
# 关联成 instance 的 vdb, 在装机完成之后 attach 的原因是:
# 我发现在装机开始就 attach 的话, vdb 会被搞成 swap,
# pvcreate 的时候就会失败.
volume.attach(name, data_volume_path, "vdb")
def create_wmi(name, uuid, vcpu, mem, os_size, data_size, ip,
hostname, wmi_id, netmask, gateway, bridge):
""" 根据 wmi 创建 instance.
大概步骤是这样:
1. 根据 wmi_id 获取到 wmi 的信息, 数据结构类似.
2. 创建相应的系统盘和数据盘.
3. 下载对应的镜像, 覆盖掉上一步创建的盘.
4. 如果要求的 size 比 镜像中的大, 则增大空间.
5. 根据模板文件生成虚拟机配置文件, 需修改:
1). 硬盘信息; # 最麻烦
2). 网卡信息;
3). name;
4). uuid;
5). vcpu;
6). mem;
6. 定义配置文件, 修改系统镜像.
7. 启动系统.
8. 增加虚拟机机器的 DNS 记录.
"""
# 确认 IP 是否合法.
if not utils.is_valid_ip(ip):
message = "ip:{ip} is illegal".format(ip=ip)
logger.warning(message)
raise Exception(message)
# 获取 wmi 数据.
wmi_data = wmi.get(wmi_id)
os_name = name
os_volume_path = volume.create(os_name, os_size)
data_name = name + "_data"
data_volume_path = volume.create(data_name, data_size)
os_url = wmi_data["os"]["url"]
# 对于 qcow2 格式的系统盘, 直接 wget, 并重置大小.
if volume.path_type(os_volume_path) == "qcow2":
utils.wget(os_url, os_volume_path)
if int(os_size.strip("G")) > int(wmi_data["os"]["size"].strip("G")):
volume.resize(os_name, os_size)
if int(data_size.strip("G")) > int(wmi_data["data"]["size"].strip("G")):
volume.resize(data_name, data_size)
# 对于 raw 格式的系统盘, 不能使用 wget.
# 一种选择是使用 qemu-img convert -O raw 命令,
# 但是会有系统盘大小的数据写入, 给 IO 造成很大压力.
# 这里我使用 分区表 的方式来减少 IO.
if volume.path_type(os_volume_path) == "raw":
storage.restore_rawos_from_qcow2_image(os_volume_path, wmi_data)
# 数据盘初始化.
init_data_volume_cmd = """virt-format -a {data_volume_path} --lvm=/dev/datavg/home --filesystem=ext4""".format(
data_volume_path=data_volume_path)
try:
utils.shell(init_data_volume_cmd)
except Exception, e:
utils.shell("yum -y update qemu-kvm")
utils.shell(init_data_volume_cmd)
# tar-in 数据到数据盘.
tar_in_cmd = """curl {data_url} | guestfish add {data_volume_path} : run : mount /dev/datavg/home / : tar-in - / compress:gzip""".format(
data_url=wmi_data["data"]["url"], data_volume_path=data_volume_path)
utils.shell(tar_in_cmd)
# volumes 用于创建配置文件.
if volume.path_type(os_volume_path) == "qcow2":
disk_type = "file"
driver_type = "qcow2"
source_type = "file"
else:
disk_type = "block"
driver_type = "raw"
source_type = "dev"
volumes = [
{
"file": os_volume_path,
"dev": "vda",
"disk_type": disk_type,
"driver_type": driver_type,
"source_type": source_type
},
{
"file": data_volume_path,
"dev": wmi_data["data"]["device"],
"disk_type": disk_type,
"driver_type": driver_type,
"source_type": source_type
}
]
# 生成网卡 MAC 地址.
interface_br1 = utils.random_mac()
interface_br2 = utils.random_mac()
# 生成配置文件.
_dict = {
"name": name,
"uuid": uuid,
"vcpu": vcpu,
"memory": int(mem) * 1024,
"currentmemory": int(mem) * 1024,
"volumes": volumes,
"interface_br1": interface_br1,
"interface_br2": interface_br2
}
template.gen(_dict)
# 对镜像进行修改.
cmd = """ sh libs/guestfish_wmi.sh {name} {uuid} {ip} {netmask} {gateway} {hostname} {hwaddr_em2} {hwaddr_em1} """.format(
name=name, uuid=uuid, ip=ip, netmask=netmask, gateway=gateway,
hostname=hostname, hwaddr_em2=interface_br2,
hwaddr_em1=interface_br1)
utils.shell(cmd)
# 增加 DNS 解析.
ret = dnsapi.add_record(hostname, ip)
if ret["status"] != "success":
logger.warning(ret["result"])
raise Exception(ret["result"])
def create(data):
""" 创建一个 instance.
"""
_type = data["type"]
area = data["area"]
uuid = data["uuid"]
name = data["vmname"]
vcpu = data["vcpu"]
mem = data["mem"]
os_size = data["os_size"]
data_size = data["data_size"]
ip = data["ip"]
netmask = data["netmask"]
gateway = data["gateway"]
bridge = data["bridge"]
if _type == "wmi":
wmi_id = data["wmi_id"]
hostname = data["hostname"]
ret = create_wmi(name, uuid, vcpu, mem, os_size, data_size, ip,
hostname, wmi_id, netmask, gateway, bridge)
return ret
elif _type == "origin":
version = data["version"]
location = data["location"]
ks = data["ks"]
ret = create_origin(name, uuid, version, vcpu, mem, os_size, data_size,
ip, location, netmask, gateway, ks, bridge)
return ret
def delete(name):
""" 删除一个 instance.
这个函数对两种存储是通用的.
"""
# 获取虚拟机的所有 volume.
# 如果写在 virsh destroy 前面, 可能会造成 volume 丢失
#(我遇见是少第二块盘往后的), 很奇怪.
cmd = """ virsh domblklist %s |awk 'NR>2' |egrep -v '\.iso|hd' """\
"""|awk '{print $2}' """ % name
return_out = utils.shell(cmd)
devices = return_out.strip().splitlines()
# 停止 instance, 可能已经关机.
cmd = ''' virsh destroy {name} '''.format(name=name)
utils.shell(cmd, exception=False)
# 删除 instance 的所有 volume.
for device in devices:
volume.delete(device)
# 删除配置文件.
cmd = ''' virsh undefine {name} '''.format(name=name)
utils.shell(cmd)
def shutdown(name):
""" 把一台 instance 停掉.
"""
cmd = ''' virsh destroy {name} '''.format(name=name)
utils.shell(cmd)
def reboot(name):
""" 把一台 instance 重启.
"""
cmd = ''' virsh destroy {name} && '''\
''' virsh start {name}'''.format(name=name)
utils.shell(cmd)
| [
"liningning@wandoujia.com"
] | liningning@wandoujia.com |
d23839856394dc02f015232730ab6a9f83793f3c | eb0711915d6bba2f765f052736e33ac9a9a397a6 | /HE1104/model/glee_samp/samp34/glee_chain.py | e949310be1517c45744911df615febd2d25190a9 | [] | no_license | dartoon/GL_HostGalaxy | cd2166f273ae7e0397a7d2d39f760ab59e86f014 | 7469f1c1e640d176a75cc6e9497920e494ad656a | refs/heads/master | 2016-08-11T13:27:17.545360 | 2016-04-07T19:04:57 | 2016-04-07T19:04:57 | 46,524,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | import numpy,subprocess
file1 = open('HE34.001.mcmc','r') #should changed with right name
para = numpy.loadtxt(file1)[-30:,0]
file1.close()
i=int(para[0])
#subprocess.call(["echo int(para[0])"],shell=True)
#subprocess.call(["glee -S ${i} HE34.001.001"],shell=True)
#print para.shape
#print para.astype(int)
g=open('chain_NO','w')
numpy.savetxt(g,para.astype(int),fmt='%i')
g.close
| [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
1baa5445cd69ffb9bf4d2b4b632526716f1c3056 | 7a24bd87eb80edefe0db75c84ace364fc093e04a | /examples/hybrid_inverter_sigmoid/hybrid_inverter_sigmoid.py | 21d6b806f4acac4a2f6b41921f70e1272bc22318 | [] | no_license | qibolun/DryVR_0.2 | 5ab1171b0d5a3d4bdaae30713cd450d5797b002e | 4ee2bbc736d382043be585906704bcc4dc115d3d | refs/heads/master | 2021-10-09T06:00:41.738030 | 2021-09-27T19:59:26 | 2021-09-27T19:59:26 | 102,651,161 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | from examples import c2e2wrapper
def TC_Simulate(Mode,initialCondition,time_bound):
# Map givien mode to int 1
if Mode == "Rampup_A":
modenum = 1
elif Mode == "Rampup_B":
modenum = 2
elif Mode == "Rampup_C":
modenum = 3
elif Mode == "Rampup_D":
modenum = 4
elif Mode == "Rampup_E":
modenum = 5
elif Mode == "Rampup_F":
modenum = 6
elif Mode == "Rampup_G":
modenum = 7
elif Mode == "Rampdown_A":
modenum = 8
elif Mode == "Rampdown_B":
modenum = 9
elif Mode == "Rampdown_C":
modenum = 10
elif Mode == "Rampdown_D":
modenum = 11
elif Mode == "Rampdown_E":
modenum = 12
elif Mode == "Rampdown_F":
modenum = 13
elif Mode == "Rampdown_G":
modenum = 14
simfile = './examples/uniform_NOR_sigmoid/simu'
timeStep = 0.00005
# This model need some spcial handle
# This is because we want to discard the t in the simulator
# Adding t to the simulation initial condition
initialCondition = [0.0, initialCondition[0], initialCondition[1]]
result = c2e2wrapper.invokeSimulator(
modenum,
simfile,
initialCondition,
timeStep,
time_bound
)
ret = []
# Discard time info from the simulator and return to DRYVR
for line in result:
ret.append([line[0], line[1], line[2])
return ret
| [
"pricejmh0911@gmail.com"
] | pricejmh0911@gmail.com |
e7dc5ed0f00f3e833ce7517bce5d9cc929080645 | 0600f0979fe17624d33aa74c739775f0f27a3bb5 | /docs/support/test_my_module.py | 5f767d88b77b3f65e6e07decef75159d16f98d1c | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | pmacosta/putil | 2c8177fb6b9be667b8d52b48bfd3272de8b0160d | 416cea52df8221981727e25d133e9b4e3f464798 | refs/heads/master | 2021-01-21T13:33:41.232773 | 2016-05-17T12:57:30 | 2016-05-17T12:57:30 | 44,289,408 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | # test_my_module.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,C0410,C0411,R0903,W0104,W0105
import pytest, docs.support.my_module, putil.test
def test_func():
""" Test func() function """
putil.test.assert_exception(
docs.support.my_module.func,
TypeError,
'Argument `name` is not valid',
{'name':5}
)
assert docs.support.my_module.func('John') == 'My name is John'
def test_my_class():
""" Test MyClass() class """
obj = docs.support.my_module.MyClass()
with pytest.raises(RuntimeError) as excinfo:
obj.value
assert putil.test.get_exmsg(excinfo) == 'Attribute `value` not set'
with pytest.raises(RuntimeError) as excinfo:
obj.value = 'a'
assert putil.test.get_exmsg(excinfo) == 'Argument `value` is not valid'
| [
"pmasdev@gmail.com"
] | pmasdev@gmail.com |
6a5b3a7537dbc9e4a4153681f88f95e68da6f860 | 099b4f825cf6ccf7a9795154f2d7a7daa64d4691 | /python_udemy_course/32 flexible args.py | d74741bf809d34e3c10f9a8b8a3ee3376b66a3e8 | [] | no_license | espiercy/py_junkyard | 22637f4b1056cd7571d99dfc14e27a0590695733 | 48204ddd00a366e67e98e2d6a01921b659677d57 | refs/heads/master | 2020-03-23T11:27:46.779467 | 2018-09-22T12:55:08 | 2018-09-22T12:55:08 | 141,504,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | ##THAT'S pretty cool...
def catch( *vars ):
for var in vars:
print(var)
catch(23, 'udemy', [1,2,'list var']) | [
"evanpiercy@gmail.com"
] | evanpiercy@gmail.com |
12e394688e18a0a8cd50c4672cb605e0ec4083fc | a635b8d51016220a6d84808def431c27dde41b90 | /libcms/apps/journal/urls.py | b06afb6d9b18c032c58083f127b4facac70b13b9 | [] | no_license | isergey/chel | aab3ac98ae2a10258f7a5afce88c74f9e13a2d7f | d1a38bfe7ebba80d9c39ae3b0d54ebfd2965046c | refs/heads/master | 2023-07-07T02:13:41.363452 | 2023-06-26T10:25:14 | 2023-06-26T10:25:14 | 3,816,204 | 1 | 0 | null | 2023-03-31T14:52:31 | 2012-03-24T09:33:53 | JavaScript | UTF-8 | Python | false | false | 322 | py | # -*- coding: utf-8 -*-
from django.urls import re_path, include
from . import views
from .administration import urls as aurls
urlpatterns = (
re_path(r'^$', views.index , name='index'),
re_path(r'^redirect$', views.redirect_to_url , name='redirect_to_url'),
re_path(r'^admin/', include((aurls, 'admin'))),
)
| [
"dostovalov@gmail.com"
] | dostovalov@gmail.com |
bab55d69218c204a901ec720b1cc08572d8a0b35 | eb35535691c4153ba2a52774f0e40468dfc6383d | /hash_table/find_diff_str.py | 2c56e7768ccda2172fbde10be2df7b65ac7c8071 | [] | no_license | BJV-git/leetcode | 1772cca2e75695b3407bed21af888a006de2e4f3 | dac001f7065c3c5b210024d1d975b01fb6d78805 | refs/heads/master | 2020-04-30T19:04:12.837450 | 2019-03-21T21:56:24 | 2019-03-21T21:56:24 | 177,027,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # logic: said it got added, by random shuffle
# means no set, as not sure if single or duped
# using xor
def diff(s,t):
d={}
ans=0
for c in s+t:
ans^=ord(c)
return chr(ans)
for i in s:
d[i] = d.get(i,0)+1
for j in t:
try:
d[j]-=1
except:
return j
res=[i for i,j in d if j!=0]
return res[0] | [
"noreply@github.com"
] | BJV-git.noreply@github.com |
efafed03e4c3629dbeaf409f4d9be4f0dedbd82e | 21e10ee87c314cdceaec04217e42332237c7fb58 | /SVM_Classifier.py | 86d4ae1b6e9f883d3ce122762d7655d3c9d34f1d | [] | no_license | QuantuMobileSoftware/SindhiRecognizer | 199f09d5b304dc2354972a38b7471d83dae0dfdc | 6c2af0ea9c796e7a4bc3b2b028b269a4a8be829a | refs/heads/master | 2021-01-22T23:06:14.023760 | 2017-03-20T14:43:27 | 2017-03-20T14:43:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | import sys
import cv2
import argparse
import warnings
from time import time
from docx import Document
from sklearn.externals import joblib
reload(sys)
sys.setdefaultencoding('utf-8')
# ignore version warnings
def warn(*args, **kwargs):
pass
warnings.warn = warn
# add argument parser for images
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', help='path to .jpg file')
args = vars(ap.parse_args())
#start timer
start_time = time()
print ('Start recognition')
# read image and model
image = cv2.imread(args['image'])
model = joblib.load("model.pkl")
# process image
image = cv2.resize(image, (80, 50), interpolation=cv2.INTER_AREA)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
threshold, image = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
image = image.flatten()
# predict class
result = model.predict(image)
# print the results
document = Document('txt/{}.docx'.format(result[0]))
for para in document.paragraphs:
print (para.text)
document.save('output.docx')
#end timer
end_time = time() - start_time
print ('Recognition ended in {} seconds'.format(round(end_time, 2))) | [
"you@example.com"
] | you@example.com |
941719ccebe58c5ac65e905376ab4b8eb872dce4 | 786de89be635eb21295070a6a3452f3a7fe6712c | /PSHist/tags/V00-03-00/SConscript | ea4b597c8c16aee6b43d5b58687145b681ac6fc9 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# SConscript file for package PSHist
#------------------------------------------------------------------------
# Do not delete following line, it must be present in
# SConscript file for any SIT project
Import('*')
#
# For the standard SIT packages which build libraries, applications,
# and Python modules it is usually sufficient to call
# standardSConscript() function which defines rules for all
# above targets. Many standard packages do not need any special options,
# but those which need can modify standardSConscript() behavior using
# a number of arguments, here is a complete list:
#
# LIBS - list of additional libraries needed by this package
# BINS - dictionary of executables and their corresponding source files
# TESTS - dictionary of test applications and their corresponding source files
# SCRIPTS - list of scripts in app/ directory
# UTESTS - names of the unit tests to run, if not given then all tests are unit tests
# PYEXTMOD - name of the Python extension module, package name used by default
#
#
standardSConscript()
| [
"salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | salnikov@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 | |
3b0fc70408fbc7d9c1869f21fc189ff9745646ab | 3c97ecb4ca2104ef454c9768cbe6a0c759d4b174 | /scratch/debug_overwrite/scratch_20.py | 86217bfe8856e29b7eb5241812f3a558d5c8602b | [
"BSD-3-Clause"
] | permissive | takuma-yoneda/ml_logger | 44a1add97e00e32e8b66bbac5d4df2711fabede8 | dd619ead4c4ae6927e6093982b40a27ff51b47ec | refs/heads/master | 2023-07-13T07:02:22.430393 | 2021-08-01T20:46:34 | 2021-08-01T20:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,138 | py | import pickle
from cmx import doc
from ml_logger import logger
doc @ """
# Debug Logger Overwrite Bug
Reading from metrics file:
"""
logger.configure("http://54.71.92.65:8080", prefix='geyang/project/debug_logs')
logger.remove('debug_logs')
doc.print(logger.root)
logger.log_text("""
charts:
- i
""", dedent=True, filename=".charts.yml")
for i in range(3):
logger.log_key_value(i=i)
logger.flush()
import time
time.sleep(1)
doc @ "```ansi"
doc @ logger.load_text("outputs.log")
doc @ "```"
with doc:
data = logger.read_metrics()
doc.print(data)
doc.flush()
exit()
doc @ """
# debug logger overwrite bug
Reading from metrics file:
"""
with open('outputs.log') as f:
for l in f.readlines():
print(l.rstrip())
print(pickle.load(l))
with open('metrics.pkl', 'rb') as f:
a = pickle.load(f)
print(a)
if __name__ == '__main__':
logger.configure(root="http://improbable-ai.dash.ml:8080", register_experiment=False)
df = logger.read_metrics(
path="/geyang/dreamer_v2/2021/01-22/01_atari/train/02.13.42/atari_solaris/s-200/6/metrics.pkl")
df # dataframe
print(df)
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
6ef8af05d4c89526fa1b2729dcc670018295f892 | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /test/app_compat/csuite/integration_tests/csuite_crash_detection_test.py | 9dd8a00f587c2639b5b5152ae31b4d25681ad48e | [] | no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | # Lint as: python3
#
# Copyright 2020, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests C-Suite's crash detection behavior."""
import csuite_test_utils
class CrashDetectionTest(csuite_test_utils.TestCase):
def setUp(self):
super(CrashDetectionTest, self).setUp()
self.adb = csuite_test_utils.Adb()
self.repo = csuite_test_utils.PackageRepository()
self.harness = csuite_test_utils.CSuiteHarness()
def tearDown(self):
super(CrashDetectionTest, self).tearDown()
self.harness.cleanup()
self.repo.cleanup()
def test_no_crash_test_passes(self):
test_app_package = 'android.csuite.nocrashtestapp'
self.adb.run(['logcat', '-c'])
completed_process = self.run_test(
test_app_package=test_app_package,
test_app_module='csuite_no_crash_test_app')
self.expect_regex(completed_process.stdout, r"""PASSED\s*:\s*1""")
self.expect_app_launched(test_app_package)
self.expect_package_not_installed(test_app_package)
def test_crash_on_launch_test_fails(self):
test_app_package = 'android.csuite.crashonlaunchtestapp'
self.adb.run(['logcat', '-c'])
completed_process = self.run_test(
test_app_package=test_app_package,
test_app_module='csuite_crash_on_launch_test_app')
self.expect_regex(completed_process.stdout, r"""FAILED\s*:\s*1""")
self.expect_app_launched(test_app_package)
self.expect_package_not_installed(test_app_package)
def run_test(self, test_app_package, test_app_module):
"""Set up and run the launcher for a given test app."""
# We don't check the return code since adb returns non-zero exit code if
# the package does not exist.
self.adb.uninstall(test_app_package, check=False)
self.assert_package_not_installed(test_app_package)
module_name = self.harness.add_module(test_app_package)
self.repo.add_package_apks(
test_app_package, csuite_test_utils.get_test_app_apks(test_app_module))
file_resolver_class = 'com.android.csuite.config.AppRemoteFileResolver'
return self.harness.run_and_wait([
'--serial',
csuite_test_utils.get_device_serial(),
'run',
'commandAndExit',
'launch',
'-m',
module_name,
'--enable-module-dynamic-download',
'--dynamic-download-args',
'%s:uri-template=file://%s/{package}' %
(file_resolver_class, self.repo.get_path())
])
def expect_regex(self, s, regex):
with self.subTest():
self.assertRegex(s, regex)
def assert_package_not_installed(self, package_name):
self.assertNotIn(package_name, self.adb.list_packages())
def expect_package_not_installed(self, package_name):
with self.subTest():
self.assert_package_not_installed(package_name)
def expect_app_launched(self, tag):
logcat_process = self.adb.run(['logcat', '-d', '-v', 'brief', '-s', tag])
with self.subTest():
self.assertIn('App launched', logcat_process.stdout)
if __name__ == '__main__':
csuite_test_utils.main()
| [
"rick_tan@qq.com"
] | rick_tan@qq.com |
b61bfcf65971bd7ff9bf1bd4987d1db5eade588c | 84c1e780a349c4bae2d6cf4c1da72889d5222797 | /Python/Numpy/Linear Algebra/linear_algebra.py | 1604f1d3feede1f8abdaa810eb1cabeedf232a41 | [
"MIT"
] | permissive | brianchiang-tw/HackerRank | 18e31583b10cf2189adac97e7cb2997d46790bcd | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | refs/heads/master | 2020-09-23T23:18:08.253868 | 2020-02-13T14:16:22 | 2020-02-13T14:16:22 | 225,612,833 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import numpy as np
if __name__ == '__main__':
dim = int( input() )
arr = []
for i in range(dim):
arr += list( map(float, input().split() ) )
np_arr = np.array( arr )
np_arr = np.reshape( np_arr, (dim, dim) )
determine = np.linalg.det(np_arr)
np.set_printoptions(legacy='1.13')
print( determine )
| [
"brianchiang1988@icloud.com"
] | brianchiang1988@icloud.com |
9e51c017d58a209ca1d4d21a997e29a3ebe2d9d4 | bc2327d2bce695bb4881be63b1912f550857fd14 | /data_structures/examples/example2_8.py | 05f54255aae14831835018af84afadf975b148b2 | [] | no_license | mentalclear/fluent-in-python | 1a1d9ad30e949e72d8633156091b84b6d52b85bc | 243cff274861abc853b4ba5d03090191df5cd7db | refs/heads/master | 2023-08-05T19:26:48.787996 | 2021-10-06T13:04:14 | 2021-10-06T13:04:14 | 402,944,060 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | metro_areas = [
('Tokyo', 'JP', 36.933, (35.689722, 139.691667)),
('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),
('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),
('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),
('São Paulo', 'BR', 19.649, (-23.547778, -46.635833)),
]
def main():
print(f'{"":15} | {"latitude":>9} | {"longitude":>9}')
for name, _, _, (lat, lon) in metro_areas:
if lon <= 0:
print(f'{name:15} | {lat:9.4f} | {lon:9.4f}')
if __name__ == '__main__':
main() | [
"mentalclear@gmail.com"
] | mentalclear@gmail.com |
322bac495f779103d108af47f10b388fc9a45d48 | ab8508696b4938331c752aaed0cc5e4062bc8625 | /week2/FirstDay/keyvaluestore2/store/views.py | 5b8fd90e5a2592923b4652fbbb755e0d193a859b | [] | no_license | Nimor111/Django-Course | 5765097cb37ce8b9c1efe93c9a3dad23cd7bd07d | e3b36a345227848928f07af0efb7c81ff082bc22 | refs/heads/master | 2021-01-21T10:34:45.112421 | 2017-05-11T15:00:03 | 2017-05-11T15:00:03 | 83,458,838 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from .logic import create_user, write_key, get_key, delete_key
import json
# Create your views here.
@csrf_exempt
def create_user_view(request):
identifier = create_user()
return JsonResponse({'identifier': identifier})
@csrf_exempt
def write_key_view(request, identifier):
if request.method == 'POST':
body = request.body.decode('utf-8')
body = json.loads(body)
try:
write_key(identifier, body['key'], body['value'])
return HttpResponse(status=201)
except ValueError:
return HttpResponse(status=404)
return HttpResponse(status=403)
@csrf_exempt
def get_or_delete_view(request, identifier, key):
if request.method == 'GET':
return get_key_view(request, identifier, key)
if request.method == 'DELETE':
return delete_key_view(request, identifier, key)
@csrf_exempt
def get_key_view(_, identifier, key):
try:
data = get_key(identifier, key)
return JsonResponse(data, json_dumps_params={'indent': 4}, status=200)
except KeyError:
return JsonResponse({"error": "Key not found."},
json_dumps_params={'indent': 4},
status=404)
except ValueError:
return JsonResponse({"error": "Key not found."},
json_dumps_params={'indent': 4},
status=404)
@csrf_exempt
def delete_key_view(_, identifier, key):
try:
data = delete_key(identifier, key)
return JsonResponse({'status': 'deleted'}, status=202)
except KeyError:
return JsonResponse({'error': 'key'}, status=404)
except ValueError:
return JsonResponse({"error": "Key not found."},
json_dumps_params={'indent': 4},
status=404)
| [
"georgi.bojinov@hotmail.com"
] | georgi.bojinov@hotmail.com |
d44967b219c2a51424a6673dad657e88b2aadb93 | bcad774066bda2ed89b37e200e00312e49a38f2b | /powertrain/models/project.py | 09397241e9bf9eae8741286ea6dc2373ccaf64cc | [] | no_license | VUIIS/powertrain | d52774fd4c4f80b4940003e76a12c63feaee774b | 6260c70dbcc36b1fec5d25cabba34ee72bc309cc | refs/heads/master | 2020-05-24T15:13:16.280501 | 2014-05-21T20:17:03 | 2014-05-21T20:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" project.py
A collection of MR sessions & associated their processing
"""
__author__ = 'Scott Burns <scott.s.burns@vanderbilt.edu>'
__copyright__ = 'Copyright 2014 Vanderbilt University. All Rights Reserved'
from .. import db
class Project(db.Model):
"""TODO: Documentation for Project"""
__tablename__ = 'projects'
id = db.Column(db.Integer, primary_key=True)
# attrs
name = db.Column(db.String)
# relations
mrsessions = db.relationship('MRSession', backref='project')
users = db.relationship("User",
secondary='project_to_user',
backref="projects")
tasks = db.relationship("Task", backref='project')
def __repr__(self):
return "<Project(name={0.name}, nsessions={1:d})>".format(self,
len(self.mrsessions))
| [
"scott.s.burns@gmail.com"
] | scott.s.burns@gmail.com |
a8bf4a6ab220f84abfd45454ca066195a871ad21 | 5506e6244a67bc46903858cb8ed4f6bf83d577c1 | /examples/fcis/coco/eval_coco.py | 39708a8000cb18f799b12a3bb8b1bbcb679604ba | [
"MIT"
] | permissive | knorth55/chainer-psroi-align | c122d00cf3b911546978053adccb19fff1486d06 | 66b55e9ea24f8fd36215a604a65235ba53026cc1 | refs/heads/master | 2020-03-19T05:03:29.820238 | 2018-08-05T14:31:17 | 2018-08-05T14:32:13 | 135,895,474 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | import argparse
import chainer
from chainer import iterators
from chainercv.datasets import coco_instance_segmentation_label_names
from chainercv.datasets import COCOInstanceSegmentationDataset
from chainercv.evaluations import eval_instance_segmentation_coco
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from psroi_align.links.model import FCISPSROIAlignResNet101
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model')
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
proposal_creator_params = {
'nms_thresh': 0.7,
'n_train_pre_nms': 12000,
'n_train_post_nms': 2000,
'n_test_pre_nms': 6000,
'n_test_post_nms': 1000,
'force_cpu_nms': False,
'min_size': 0
}
model = FCISPSROIAlignResNet101(
n_fg_class=len(coco_instance_segmentation_label_names),
min_size=800, max_size=1333,
anchor_scales=(2, 4, 8, 16, 32),
pretrained_model=args.pretrained_model,
proposal_creator_params=proposal_creator_params)
model.use_preset('coco_evaluate')
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
dataset = COCOInstanceSegmentationDataset(
split='minival', use_crowded=True,
return_crowded=True, return_area=True)
iterator = iterators.SerialIterator(
dataset, 1, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
# delete unused iterators explicitly
del in_values
pred_masks, pred_labels, pred_scores = out_values
gt_masks, gt_labels, gt_areas, gt_crowdeds = rest_values
result = eval_instance_segmentation_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas, gt_crowdeds)
keys = [
'map/iou=0.50:0.95/area=all/max_dets=100',
'map/iou=0.50/area=all/max_dets=100',
'map/iou=0.75/area=all/max_dets=100',
'map/iou=0.50:0.95/area=small/max_dets=100',
'map/iou=0.50:0.95/area=medium/max_dets=100',
'map/iou=0.50:0.95/area=large/max_dets=100',
'mar/iou=0.50:0.95/area=all/max_dets=1',
'mar/iou=0.50:0.95/area=all/max_dets=10',
'mar/iou=0.50:0.95/area=all/max_dets=100',
'mar/iou=0.50:0.95/area=small/max_dets=100',
'mar/iou=0.50:0.95/area=medium/max_dets=100',
'mar/iou=0.50:0.95/area=large/max_dets=100',
]
print('')
for key in keys:
print('{:s}: {:f}'.format(key, result[key]))
if __name__ == '__main__':
main()
| [
"shingogo@hotmail.co.jp"
] | shingogo@hotmail.co.jp |
8821ece150449d0aafa9471faf4ac0a8cfb93e82 | 2d08f3dd8cb72fc0f64ac98d6f5409b2d8117f75 | /service/dpms/tasks.py | 78029a4f875dde4552ff5a27d3bed4073287c647 | [] | no_license | Minexora/homeauto | b13291054543669376f1ed22a395ec1001883fa3 | dc44eec86236a916fcf589047ff5ed9272088950 | refs/heads/master | 2023-08-23T23:09:13.992762 | 2021-09-28T05:56:25 | 2021-09-28T05:56:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | from invoke import task
JOB = 'dpms'
PORT = 9095
TAG_x86 = f'bang6:5000/{JOB.lower()}_x86:latest'
TAG_pi = f'bang6:5000/{JOB.lower()}_pi:latest'
ANSIBLE_TAG = 'homeauto_dpms'
@task
def build_image_x86(ctx):
ctx.run(f'docker build --network=host -t {TAG_x86} .')
@task
def build_image_pi(ctx):
ctx.run(f'docker build --file Dockerfile.pi --network=host -t {TAG_pi} .')
@task(pre=[build_image_x86])
def push_image_x86(ctx):
ctx.run(f'docker push {TAG_x86}')
@task(pre=[build_image_pi])
def push_image_pi(ctx):
ctx.run(f'docker push {TAG_pi}')
@task(pre=[build_image_x86])
def shell(ctx):
ctx.run(f'docker run --rm -it --cap-add SYS_PTRACE -v /tmp/.X11-unix/:/tmp/.X11-unix/ -v /home/drewp/.Xauthority:/root/.Xauthority --net=host {TAG_x86} /bin/bash', pty=True)
@task(pre=[build_image_x86])
def local_run(ctx):
ctx.run(f'docker run --rm -it -v /tmp/.X11-unix/:/tmp/.X11-unix/ -v /home/drewp/.Xauthority:/root/.Xauthority -p {PORT}:{PORT} -v /etc/resolv.conf:/etc/resolv.conf --net=host {TAG_x86} python3 dpms_service.py -v', pty=True)
@task(pre=[push_image_x86, push_image_pi])
def redeploy(ctx):
ctx.run(f'sudo /my/proj/ansible/playbook -t {ANSIBLE_TAG}')
#ctx.run(f'supervisorctl -s http://bang:9001/ restart {JOB}_{PORT}')
| [
"drewp@bigasterisk.com"
] | drewp@bigasterisk.com |
e46266f28d3ae1c434ea4a8edfb25b967021f173 | f20f930154569f1a353fd8102e7b7f666a042b4d | /utilidades/scripts graficas covertura/test_nbvnet.py | 38140f3a82ce27a5543a5c486ceb569fb1e29c5d | [] | no_license | irvingvasquez/Programas_NBV | 69e15e0bb0750180c783857182e2e574d3fe5528 | 0bfbabaf0c5fd5e0ad81156a960fabe29e93be44 | refs/heads/master | 2020-04-13T21:40:22.697290 | 2018-11-30T19:16:36 | 2018-11-30T19:16:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,615 | py | #!/usr/bin/env python
#crea un octree de 32x32x32 del archivo de entrada indicado
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import tensorflow as tf
import argparse
def flatten_cube_func(direction, octree_cube):
cloud_file = np.loadtxt(direction)
##octree configuration parameters
min_octrees_file = -octree_cube #object capsule param from partialModelCofig file
max_octrees_file = -min_octrees_file
min_cubo = 0
max_cubo = 32
m = (max_cubo - min_cubo) / (max_octrees_file - min_octrees_file)
output_cube_size = 32*32*32
x = cloud_file[:,0]
y = cloud_file[:,1]
z = cloud_file[:,2]
v = cloud_file[:,3]
output_cube = np.zeros((32,32,32))
##### the cube of the octree of size 32x32x32 is obtained
for i in range(output_cube_size):
x_cube = int((x[i]*m*2+32)/2)
y_cube = int((y[i]*m*2+32)/2)
z_cube = int((z[i]*m*2+32)/2)
output_cube[x_cube][y_cube][z_cube] = v[i]
##### flatten representation of the cube is obtained
flatten_cube = output_cube.reshape(1,32*32*32)
return flatten_cube
def voxnet(x, n_classes = 14 ):
mu = 0
sigma = 0.1
#limpiar graficas anteriores
#reset_graph()
#Imagenes
img = tf.reshape(x, shape=[-1, 32,32,32,1])
#tf.summary.image("Image", img)
# Declarando las variables
weights = {'W_conv1':tf.Variable(tf.truncated_normal([5,5,5,1,32], mean = mu, stddev = sigma)),
'W_conv2':tf.Variable(tf.truncated_normal([3,3,3,32,32], mean = mu, stddev = sigma)),
'W_conv3':tf.Variable(tf.truncated_normal([3,3,3,12,8], mean = mu, stddev = sigma)),
'W_fc1':tf.Variable(tf.truncated_normal([8*8*8*32,128], mean = mu, stddev = sigma)),
'W_fc2':tf.Variable(tf.truncated_normal([1500, 500], mean = mu, stddev = sigma)),
'W_fc3':tf.Variable(tf.truncated_normal([500, 100], mean = mu, stddev = sigma)),
'W_fc4':tf.Variable(tf.truncated_normal([100,50], mean= mu, stddev= sigma)),
'out':tf.Variable(tf.truncated_normal([128, n_classes], mean = mu, stddev = sigma))}
biases = {'b_conv1':tf.Variable(tf.random_normal([32])),
'b_conv2':tf.Variable(tf.random_normal([32])),
'b_conv3':tf.Variable(tf.random_normal([8])),
'b_fc1':tf.Variable(tf.random_normal([128])),
'b_fc2':tf.Variable(tf.random_normal([500])),
'b_fc3':tf.Variable(tf.random_normal([100])),
'b_fc4':tf.Variable(tf.random_normal([50])),
'out':tf.Variable(tf.random_normal([n_classes]))}
# Declarando la arquitectura
#Input: 200x200x3 Output: 50x50x3
l1 = tf.nn.conv3d(img, weights['W_conv1'], strides=[1,2,2,2,1], padding='SAME')
l1 = tf.add(l1, biases['b_conv1'])
l1 = tf.nn.relu(l1)
#print("l1: ",l1.shape)
#l1 = tf.nn.dropout(l1, keep_rate)
#l1 = tf.nn.max_pool3d(l1, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='VALID')
#Input: 50x50x3 Output: 50x50x6
l2 = tf.nn.conv3d(l1, weights['W_conv2'], strides=[1,1,1,1,1], padding='SAME')
l2 = tf.add(l2, biases['b_conv2'])
l2 = tf.nn.relu(l2)
#l2 = tf.nn.dropout(l2, keep_rate)
#print(l2)
#Input: 50x50x6 Output: 25x25x6
l2 = tf.nn.max_pool3d(l2, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='VALID')
#print("l2: ",l2.shape)
#l3 = tf.nn.conv3d(l2, weights['W_conv3'], strides=[1,1,1,1,1], padding='SAME')
#l3= tf.add(l3, biases['b_conv3'])
#l3 = tf.nn.relu(l3)
#l2 = tf.nn.dropout(l2, keep_rate2)
#print(l2)
#Input: 50x50x6 Output: 25x25x6
#l3 = tf.nn.max_pool3d(l3, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='VALID')
#print(l3.shape)
#Input: 6x6x8 Output: 128
fc1 = tf.reshape(l2, [-1, 8*8*8*32])
fc1 = tf.nn.relu(tf.matmul(fc1, weights['W_fc1'])+biases['b_fc1'])
fc1 = tf.nn.dropout(fc1, keep_rate)
#Input: 128 Output: 64
#fc2 = tf.nn.relu(tf.matmul(fc1, weights['W_fc2'])+biases['b_fc2'])
#fc2 = tf.nn.dropout(fc2, keep_rate)
#fc3 = tf.nn.relu(tf.matmul(fc2, weights['W_fc3'])+biases['b_fc3'])
#fc3 = tf.nn.dropout(fc3, keep_rate)
#fc4 = tf.nn.relu(tf.matmul(fc3, weights['W_fc4'])+biases['b_fc4'])
#fc4 = tf.nn.dropout(fc4, keep_rate)
output_ = tf.matmul(fc1, weights['out'])+biases['out']
# Declarando la funcion de costo y entrenamiento
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y) )
#optimizer = tf.train.AdamOptimizer().minimize(cost)
#almacenar costo
#tf.summary.scalar("costo", cost)
#generar logs
#summaries = tf.summary.merge_all()
return output_
def red_3d(x, n_classes = 14 ):
mu = 0
sigma = 0.1
#limpiar graficas anteriores
#reset_graph()
#Imagenes
img = tf.reshape(x, shape=[-1, 32,32,32,1])
#tf.summary.image("Image", img)
# Declarando las variables
weights = {'W_conv1':tf.Variable(tf.truncated_normal([3,3,3,1,10], mean = mu, stddev = sigma)),
'W_conv2':tf.Variable(tf.truncated_normal([3,3,3,10,12], mean = mu, stddev = sigma)),
'W_conv3':tf.Variable(tf.truncated_normal([3,3,3,12,8], mean = mu, stddev = sigma)),
'W_fc1':tf.Variable(tf.truncated_normal([4*4*4*8,1500], mean = mu, stddev = sigma)),
'W_fc2':tf.Variable(tf.truncated_normal([1500, 500], mean = mu, stddev = sigma)),
'W_fc3':tf.Variable(tf.truncated_normal([500, 100], mean = mu, stddev = sigma)),
'W_fc4':tf.Variable(tf.truncated_normal([100,50], mean= mu, stddev= sigma)),
'out':tf.Variable(tf.truncated_normal([50, n_classes], mean = mu, stddev = sigma))}
biases = {'b_conv1':tf.Variable(tf.random_normal([10])),
'b_conv2':tf.Variable(tf.random_normal([12])),
'b_conv3':tf.Variable(tf.random_normal([8])),
'b_fc1':tf.Variable(tf.random_normal([1500])),
'b_fc2':tf.Variable(tf.random_normal([500])),
'b_fc3':tf.Variable(tf.random_normal([100])),
'b_fc4':tf.Variable(tf.random_normal([50])),
'out':tf.Variable(tf.random_normal([n_classes]))}
# Declarando la arquitectura
#Input: 200x200x3 Output: 50x50x3
l1 = tf.nn.conv3d(img, weights['W_conv1'], strides=[1,1,1,1,1], padding='SAME')
l1 = tf.add(l1, biases['b_conv1'])
l1 = tf.nn.relu(l1)
#print(l1.shape)
#l1 = tf.nn.dropout(l1, keep_rate2)
l1 = tf.nn.max_pool3d(l1, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='VALID')
#Input: 50x50x3 Output: 50x50x6
l2 = tf.nn.conv3d(l1, weights['W_conv2'], strides=[1,1,1,1,1], padding='SAME')
l2 = tf.add(l2, biases['b_conv2'])
l2 = tf.nn.relu(l2)
#l2 = tf.nn.dropout(l2, keep_rate2)
#print(l2)
#Input: 50x50x6 Output: 25x25x6
l2 = tf.nn.max_pool3d(l2, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='VALID')
#print(l2.shape)
l3 = tf.nn.conv3d(l2, weights['W_conv3'], strides=[1,1,1,1,1], padding='SAME')
l3= tf.add(l3, biases['b_conv3'])
l3 = tf.nn.relu(l3)
l3 = tf.nn.dropout(l3, keep_rate)
#print(l2)
#Input: 50x50x6 Output: 25x25x6
l3 = tf.nn.max_pool3d(l3, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='VALID')
#print(l3.shape)
#Input: 6x6x8 Output: 128
fc1 = tf.reshape(l3, [-1, 4*4*4*8])
fc1 = tf.nn.relu(tf.matmul(fc1, weights['W_fc1'])+biases['b_fc1'])
#fc1 = tf.nn.dropout(fc1, keep_rate)
#Input: 128 Output: 64
fc2 = tf.nn.relu(tf.matmul(fc1, weights['W_fc2'])+biases['b_fc2'])
fc2 = tf.nn.dropout(fc2, keep_rate)
fc3 = tf.nn.relu(tf.matmul(fc2, weights['W_fc3'])+biases['b_fc3'])
fc3 = tf.nn.dropout(fc3, keep_rate)
fc4 = tf.nn.relu(tf.matmul(fc3, weights['W_fc4'])+biases['b_fc4'])
fc4 = tf.nn.dropout(fc4, keep_rate)
output_ = tf.matmul(fc4, weights['out'])+biases['out']
return output_
def test_neural_network(network_):
saver = tf.train.Saver()
with tf.Session() as sess:
if (network_ == 'voxnet'):
model_path = '/home/miguelmg/Dropbox/tesis/pesos/clasification/voxnet/SVE_class/450.ckpt' #voxnet
else:
model_path = '/home/miguelmg/Dropbox/tesis/pesos/clasification/red_3d/SVE_class/450.ckpt' #red_3d
sess.run(tf.global_variables_initializer())
saver.restore(sess, model_path)
y_ = sess.run(output, feed_dict={x:x_in, y:y_in, keep_rate: 1})
out = tf.one_hot(y_, 14)
print("Predicted label", np.argmax(y_))
return y_
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--octofile", required = True, help = "path to read the txt octomap file ")
ap.add_argument("-n", "--network", required = True, help = "network to be used: nbvnet or voxnet")
ap.add_argument("-r", "--cube", required = True, help = "octree cube")
args = vars(ap.parse_args())
# Declarando las entradas y salidas
x=tf.placeholder('float',[None,32*32*32])
y=tf.placeholder('float')
keep_rate = tf.placeholder(tf.float32)
network_ = args["network"]
if ( network_ == 'voxnet'):
output = voxnet(x)#################################
else:
output = red_3d(x)
correct = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
real_test_vol = flatten_cube_func(args["octofile"], float(args["cube"]))
x_in = real_test_vol
y_in = np.zeros((1,14))
out = test_neural_network(network_)
| [
"you@example.com"
] | you@example.com |
b0c48d7ca0b2687f98e0c4e2ceb112548ed8608b | 2d0f940ebbf3d6f9eb5962715e2880bdc70f40b9 | /week-04/day-3/16.py | cc2313515ccdd4f5c1b0ea8758f8337071ee35a5 | [] | no_license | greenfox-velox/agyenes | 9482cdfc33d0f68f24209ce2af3629e2ccd1a120 | d341d14d7cd0bc627d02e02acabf79bc45c8ccfa | refs/heads/master | 2021-01-21T14:12:08.707870 | 2016-07-14T15:09:28 | 2016-07-14T15:09:28 | 58,042,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | # create a 300x300 canvas.
# make it look like a nigth sky:
# - The background should be black
# - The stars can be small squares
# - The stars should have random positions on the canvas
# - The stars should have random color (some shade of grey)
from tkinter import *
import random
root = Tk()
canvas = Canvas(root, width='300', height='300', bg='black')
canvas.pack()
i = 0
while i < 30:
r = random.randint(1, 290)
s = random.randint(1, 290)
print(canvas.create_rectangle(r, s, r + 5, s + 5, fill='gray'))
i += 1
canvas.pack()
root.mainloop()
| [
"aron.gyenes@gmail.com"
] | aron.gyenes@gmail.com |
63f1c759388a1f6c898a492e8f947d5c7f9b7a99 | 12d05a7f65e6ca8ffa701670ed1bec209af77a51 | /capsule_biblosa/models/biblosa/model_context_fusion.py | 3a4558947f0e4fb55f264baf4a5bd036125fec6a | [] | no_license | Bobby-Han/text-classification | ec8015e6bb438fceb8d7b61117519e2d6469c57d | 2fa6d3ed4f3b9288ff7fb385c9cced44daf522ca | refs/heads/master | 2023-02-24T13:27:36.761439 | 2021-02-01T06:27:53 | 2021-02-01T06:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py |
from models.biblosa.configs import cfg
from models.biblosa.utils_biblosa.record_log import _logger
import tensorflow as tf
from models.biblosa.model_template import ModelTemplate
from models.biblosa.nn_utils.nn import linear
from models.biblosa.nn_utils.baselines.interface import sentence_encoding_models
class ModelContextFusion(ModelTemplate):
def __init__(self, W_embedding, scope):
super(ModelContextFusion, self).__init__(W_embedding, scope)
self.update_tensor_add_ema_and_opt()
def build_network(self):
with tf.variable_scope('emb'):
emb = tf.nn.embedding_lookup(self.W_embedding, self.token_seq) # bs,sl1,tel
with tf.variable_scope('sent_encoding'):
rep = sentence_encoding_models(
emb, self.token_mask, cfg.context_fusion_method, 'relu',
'ct_based_sent2vec', cfg.wd, self.is_train, cfg.dropout, block_len=cfg.block_len)
print('emb ', emb.shape) # (?, 200, 256)
print('rep ', rep.shape) # (?, 512)
# exit(0)
with tf.variable_scope('output'):
pre_logits = tf.nn.relu(linear([rep], cfg.hidden_units_num, True, scope='pre_logits_linear',
wd=cfg.wd, input_keep_prob=cfg.dropout,
is_train=self.is_train)) # bs, hn
logits = linear([pre_logits], cfg.n_class, False, scope='get_output',
wd=cfg.wd, input_keep_prob=cfg.dropout, is_train=self.is_train) # bs, 5
_logger.done()
return logits | [
"2501967525@qq.com"
] | 2501967525@qq.com |
b1f8abb4f62f317666f8dd6764f0a878d3a3ace1 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/refactoring/move/usageFromFunction/after/src/a.py | 2f5efc6775c95a5bc58b74169a4277fee24cd33a | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 19 | py | def f():
pass
| [
"andrey.vlasovskikh@gmail.com"
] | andrey.vlasovskikh@gmail.com |
949537d562f002e55024625b80f88fff2fae99b3 | 2132557b760e9837567a5026793da65cf0ea7671 | /Desktop_App/Addax/env/Lib/site-packages/ImageDraw.py | 7aa7deccc37ef3c0f83a23d08d44347e3c31a196 | [
"MIT"
] | permissive | filesmuggler/antelope | e54fa94e3e3036723026bb2b353af8b97c00bfbf | f7f4a857fc8afb4eb9e83f2a8e3734c863ba9102 | refs/heads/master | 2020-04-16T10:48:28.858132 | 2019-01-23T18:48:16 | 2019-01-23T18:48:16 | 165,517,519 | 0 | 0 | MIT | 2019-01-23T18:48:17 | 2019-01-13T14:51:36 | Python | UTF-8 | Python | false | false | 28 | py | from PIL.ImageDraw import *
| [
"krzste09@outlook.com"
] | krzste09@outlook.com |
895ac87d8ad6d02777cd3138ceae448566c4978f | fb28a622b21f5127c83c7fe6193b6312294b2dbe | /apps/videos/serializers.py | 2168d3dee4765b054deb4c9de8ce3792e35b1ecf | [] | no_license | laoyouqing/video | 0cd608b1f9d3a94da4a537867fafce6f7dcd1297 | 9aa7ecf17f0145437408a8c979f819bb61617294 | refs/heads/master | 2022-12-19T11:02:01.343892 | 2019-08-21T04:00:13 | 2019-08-21T04:00:13 | 203,500,521 | 0 | 0 | null | 2022-12-08T06:03:17 | 2019-08-21T03:40:13 | Python | UTF-8 | Python | false | false | 1,732 | py | from rest_framework import serializers
from videos.models import IndexGoodsBanner, Video
class BannerSerializer(serializers.ModelSerializer):
'''轮播图'''
class Meta:
model = IndexGoodsBanner
fields = ('video','image')
class IndexSerializer(serializers.ModelSerializer):
'''首页'''
class Meta:
model = Video
fields = ('id','image','name','desc','status','url','try_see')
class DetailSerializer(serializers.ModelSerializer):
'''详情'''
class Meta:
model = Video
fields = ('id','image','name','desc','price','url','detail','number','standard_para','try_see','status','indexgoodsbanner_set')
depth=1
class IndexGoodsBannerSerializer1(serializers.ModelSerializer):
'''视频图片'''
create_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
update_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
class Meta:
model = IndexGoodsBanner
fields = '__all__'
# 后台
class VideoSerializer1(serializers.ModelSerializer):
'''视频'''
create_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
update_time = serializers.DateTimeField(read_only=True, format="%Y-%m-%d %H:%M:%S")
number = serializers.IntegerField(read_only=True)
indexgoodsbanner_set = IndexGoodsBannerSerializer1(many=True,read_only=True)
class Meta:
model = Video
fields = '__all__'
class UploadImageSerializer(serializers.Serializer):
'''图片上传'''
image=serializers.ImageField()
class UploadVideoSerializer(serializers.Serializer):
'''视频上传'''
video=serializers.FileField() | [
"lingo.lin@foxmail.com"
] | lingo.lin@foxmail.com |
db9d2e5d78b105e041063fa27e041b701c474cdb | 12977c4d2eae4bfd3b7112814a7e63c506e7bbbe | /ad_budget_detail_div/report/budget_detail_xls_backup.py | e1458b4cde2d2839b081227d4eb06c6b0ac9ed5c | [] | no_license | aryaadiputra/addons60_ptgbu_2013 | 211f3ab9fc74cc3e3f4df770b6ada65d24b83977 | b5cf28bdbb347df4c39ffe3ca32355bd2206077b | refs/heads/master | 2020-04-06T04:11:37.667486 | 2016-11-25T03:27:54 | 2016-11-25T03:27:54 | 58,649,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119,703 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import xlwt
from report_engine_xls import report_xls
from ad_budget_detail.report.budget_detail import budget_detail
import cStringIO
from tools.translate import _
from xlwt import Workbook, Formula
class budget_detail_xls(report_xls):
def _get_start_date(self, data):
# ok
if data.get('form', False) and data['form'].get('date_from', False):
return data['form']['date_from']
return ''
def _get_end_date(self, data):
# ok
if data.get('form', False) and data['form'].get('date_to', False):
return data['form']['date_to']
return ''
def get_start_period(self, data):
if data.get('form', False) and data['form'].get('period_from', False):
return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr,self.uid,data['form']['period_from']).name
return ''
def get_end_period(self, data):
if data.get('form', False) and data['form'].get('period_to', False):
return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr, self.uid, data['form']['period_to']).name
return ''
def _get_target_move(self, data):
if data.get('form', False) and data['form'].get('target_move', False):
if data['form']['target_move'] == 'all':
return _('All Entries')
return _('All Posted Entries')
return ''
def _get_filter(self, data):
if data.get('form', False) and data['form'].get('filter', False):
if data['form']['filter'] == 'filter_date':
return _('Date')
elif data['form']['filter'] == 'filter_period':
return _('Periods')
return _('No Filter')
def _display_filter(self, parser, data):
filter_mode = self._get_filter(data)
filter_string = filter_mode
if filter_mode == 'Date':
filter_string = '%s -> %s' % (parser.formatLang(self._get_start_date(data), date=True),
parser.formatLang(self._get_end_date(data), date=True))
elif filter_mode == 'Periods':
filter_string = '%s -> %s' % (self.get_start_period(data),
self.get_end_period(data))
moves_string = self._get_target_move(data)
display_acct_string = ''
if data['form']['display_account'] == 'bal_all':
display_acct_string = 'All'
elif data['form']['display_account'] == 'bal_movement':
display_acct_string = 'With movements'
else:
display_acct_string = 'With balance is not equal to 0'
fiscal_year_str = parser.get_fiscalyear_text(data['form'])
period_date_str = parser.get_periods_and_date_text(data['form'])
return 'Fiscal Year: %s, Period & Date By: %s' % (fiscal_year_str, period_date_str)
def _display_fiscalyear(self, parser, data):
"""k = parser.get_fiscalyear_text(data)
if k:
k = 'Fiscal Year: %s' % (k)"""
k = "asdfasdfasdfasdf"
return k
## Modules Begin
def _size_col(sheet, col):
return sheet.col_width(col)
def _size_row(sheet, row):
return sheet.row_height(row)
## Modules End
"""def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))"""
def generate_xls_report(self, parser, data, obj, wb):
c = parser.localcontext['company']
ws = wb.add_sheet(('Detail Account'))
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
ws.col(0).width = len("ABC")*256
ws.col(1).width = len("ABCD")*256
ws.col(2).width = len("ABCDEF")*2048
ws.col(3).width = len("ABC")*256
ws.col(4).width = len("ABC")*1280
ws.col(5).width = len("ABC")*256
ws.col(7).width = len("ABC")*256
ws.col(9).width = len("ABC")*256
ws.col(11).width = len("ABC")*256
ws.col(13).width = len("ABC")*256
ws.col(15).width = len("ABC")*256
ws.col(17).width = len("ABC")*256
ws.col(19).width = len("ABC")*256
ws.col(21).width = len("ABC")*256
ws.col(23).width = len("ABC")*256
ws.col(25).width = len("ABC")*256
ws.col(27).width = len("ABC")*256
ws.col(29).width = len("ABC")*256
ws.col(30).width = len("ABC")*256
ws.col(31).width = len("ABC")*1280
#ws.col(33).width = len("ABC")*256
#ws.col(35).width = len("ABC")*256
# ws.col(11).width = len("ABC")*256
# ws.col(21).width = len("ABCD")*1024
# ws.col(22).width = len("ABCD")*1024
# ws.col(23).width = len("ABCD")*1024
#ws.col(4).width = len("A bunch of longer text not wrapped")*256
#ws.row(4).height = len("A bunch of longer text not wrapped")*256
company = "%s" % (c.name)
as_of = data['form']['as_of_date']
fy = data['form']['fiscalyear_id']
styles = dict(
bold = 'font: bold 1',
italic = 'font: italic 1',
# Wrap text in the cell
wrap_bold = 'font: bold 1; align: wrap 1;',
# White text on a blue background
reversed = 'pattern: pattern solid, fore_color blue; font: color black;',
# Light orange checkered background
light_orange_bg = 'pattern: pattern fine_dots, fore_color white, back_color orange;',
# Heavy borders
bordered = 'border: top thick, right thick, bottom thick, left thick;',
# 16 pt red text
big_red = 'font: height 320, color red;',
)
#print styles['light_orange_bg']
cols_specs = [
# Headers data
('Kosong', 1, 0, 'text',
lambda x, d, p: ""),
('Note', 1, 0, 'text',
lambda x, d, p: 'Note:'),
('Note1', 6, 0, 'text',
lambda x, d, p: "1. This rolling report should include P&L, cashflow & balance sheet"),
('Note2', 6, 0, 'text',
lambda x, d, p: "2. ERP should produce both detail & summary (high level, major accounts)"),
('Note3', 6, 0, 'text',
lambda x, d, p: "3. Need to add Revenue"),
('Space', 31, 0, 'text',
lambda x, d, p: ""),
('Space2', 32, 0, 'text',
lambda x, d, p: ""),
('Space_dep', 31, 0, 'text',
lambda x, d, p: x.name),
('Company', 31, 0, 'text',
lambda x, d, p: company.upper()),
('Judul', 31, 0, 'text',
lambda x, d, p: "Detail of Account"),
('Dept', 31, 0, 'text',
lambda x, d, p: x.name),
('AsOff', 31, 0, 'text',
lambda x, d, p: "As of %s " % (parser.formatLang(as_of, date=True))),
('HeaderCOA', 5, 0, 'text',
lambda x, d, p: "Account"),
('HeaderDesc', 2, 0, 'text',
lambda x, d, p: "Descriptions"),
('SP', 1, 0, 'text', lambda x, d, p: ''),
('HeaderM1', 1, 0, 'text', lambda x, d, p: data['form']['1']['date']),
('HeaderM2', 1, 0, 'text', lambda x, d, p: data['form']['2']['date']),
('HeaderM3', 1, 0, 'text', lambda x, d, p: data['form']['3']['date']),
('HeaderM4', 1, 0, 'text', lambda x, d, p: data['form']['4']['date']),
('HeaderM5', 1, 0, 'text', lambda x, d, p: data['form']['5']['date']),
('HeaderM6', 1, 0, 'text', lambda x, d, p: data['form']['6']['date']),
('HeaderM7', 1, 0, 'text', lambda x, d, p: data['form']['7']['date']),
('HeaderM8', 1, 0, 'text', lambda x, d, p: data['form']['8']['date']),
('HeaderM9', 1, 0, 'text', lambda x, d, p: data['form']['9']['date']),
('HeaderM10', 1, 0, 'text', lambda x, d, p: data['form']['10']['date']),
('HeaderM11', 1, 0, 'text', lambda x, d, p: data['form']['11']['date']),
('HeaderM12', 1, 0, 'text', lambda x, d, p: data['form']['12']['date']),
('HeaderTotal', 1, 0, 'text', lambda x, d, p: "TOTAL"),
('HeaderBudget', 1, 0, 'text', lambda x, d, p: "BUDGET"),
('HeaderVariance', 1, 0, 'text', lambda x, d, p: "VARIANCE"),
#Month Total COGS
('TotalCOGSDesc', 8, 0, 'text', lambda x, d, p: "TOTAL PRODUCTION COST"),
('MtotCOGS1', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['1']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS2', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['2']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS3', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['3']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS4', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['4']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS5', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['5']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS6', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['6']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS7', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['7']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS8', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['8']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS9', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['9']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS10', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['10']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS11', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['11']['id'], as_of, fy, cogs, False, False)),
('MtotCOGS12', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['12']['id'], as_of, fy, cogs, False, False)),
('TotalCOGS', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Total(as_of, fy, cogs, False, False)),
('BudgetCOGS', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Budget(as_of, fy, cogs, False, False)),
('VarianceCOGS', 1, 270, 'number', lambda x, d, p: (parser.get_total_row_Budget(as_of, fy, cogs, False, False))-(parser.get_total_row_Total(as_of, fy, cogs, False, False))),
#Month Total Expense
('TotalExpense', 8, 0, 'text', lambda x, d, p: "TOTAL OPERATING EXPENSES"),
('MtotEXP1', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['1']['id'], as_of, fy, exps, False, False)),
('MtotEXP2', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['2']['id'], as_of, fy, exps, False, False)),
('MtotEXP3', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['3']['id'], as_of, fy, exps, False, False)),
('MtotEXP4', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['4']['id'], as_of, fy, exps, False, False)),
('MtotEXP5', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['5']['id'], as_of, fy, exps, False, False)),
('MtotEXP6', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['6']['id'], as_of, fy, exps, False, False)),
('MtotEXP7', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['7']['id'], as_of, fy, exps, False, False)),
('MtotEXP8', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['8']['id'], as_of, fy, exps, False, False)),
('MtotEXP9', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['9']['id'], as_of, fy, exps, False, False)),
('MtotEXP10', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['10']['id'], as_of, fy, exps, False, False)),
('MtotEXP11', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['11']['id'], as_of, fy, exps, False, False)),
('MtotEXP12', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['12']['id'], as_of, fy, exps, False, False)),
('TotalEXP', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Total(as_of, fy, exps, False, False)),
('BudgetEXP', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Budget(as_of, fy, exps, False, False)),
('VarianceEXP', 1, 270, 'number', lambda x, d, p: (parser.get_total_row_Budget(as_of, fy, exps, False, False))-(parser.get_total_row_Total(as_of, fy, exps, False, False))),
#Month Total COGS Department
('TotalCOGSDepDesc', 8, 0, 'text', lambda x, d, p: "TOTAL PRODUCTION COST"),
('MtotCOGSDep1', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['1']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep2', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['2']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep3', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['3']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep4', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['4']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep5', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['5']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep6', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['6']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep7', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['7']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep8', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['8']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep9', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['9']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep10', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['10']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep11', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['11']['id'], as_of, fy, cogsD, False, dep['id'])),
('MtotCOGSDep12', 1, 0, 'number', lambda x, d, p: parser.get_total_row_cogs(data['form']['12']['id'], as_of, fy, cogsD, False, dep['id'])),
('TotalCOGSDep', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Total(as_of, fy, cogsD, False, dep['id'])),
('BudgetCOGSDep', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Budget(as_of, fy, cogsD, False, dep['id'])),
('VarianceCOGSDep', 1, 270, 'number', lambda x, d, p: (parser.get_total_row_Budget(as_of, fy, cogsD, False, dep['id']))-(parser.get_total_row_Total(as_of, fy, cogsD, False, dep['id']))),
#Month Total Expense Department
('TotalExpenseDep', 8, 0, 'text', lambda x, d, p: "TOTAL OPERATING EXPENSES"),
('MtotEXPDep1', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['1']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep2', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['2']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep3', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['3']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep4', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['4']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep5', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['5']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep6', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['6']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep7', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['7']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep8', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['8']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep9', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['9']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep10', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['10']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep11', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['11']['id'], as_of, fy, expsD, False, dep['id'])),
('MtotEXPDep12', 1, 0, 'number', lambda x, d, p: parser.get_total_row_expense(data['form']['12']['id'], as_of, fy, expsD, False, dep['id'])),
('TotalEXPDep', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Total(as_of, fy, expsD, False, dep['id'])),
('BudgetEXPDep', 1, 270, 'number', lambda x, d, p: parser.get_total_row_Budget(as_of, fy, expsD, False, dep['id'])),
('VarianceEXPDep', 1, 270, 'number', lambda x, d, p: (parser.get_total_row_Budget(as_of, fy, expsD, False, dep['id']))-(parser.get_total_row_Total(as_of, fy, expsD, False, dep['id']))),
('StateCOA', 2, 0, 'text', lambda x, d, p: ""),
('StateDesc', 6, 0, 'text', lambda x, d, p: ""),
('StateM1', 1, 0, 'text', lambda x, d, p: data['form']['1']['state']),
('StateM2', 1, 0, 'text', lambda x, d, p: data['form']['2']['state']),
('StateM3', 1, 0, 'text', lambda x, d, p: data['form']['3']['state']),
('StateM4', 1, 0, 'text', lambda x, d, p: data['form']['4']['state']),
('StateM5', 1, 0, 'text', lambda x, d, p: data['form']['5']['state']),
('StateM6', 1, 0, 'text', lambda x, d, p: data['form']['6']['state']),
('StateM7', 1, 0, 'text', lambda x, d, p: data['form']['7']['state']),
('StateM8', 1, 0, 'text', lambda x, d, p: data['form']['8']['state']),
('StateM9', 1, 0, 'text', lambda x, d, p: data['form']['9']['state']),
('StateM10', 1, 0, 'text', lambda x, d, p: data['form']['10']['state']),
('StateM11', 1, 0, 'text', lambda x, d, p: data['form']['11']['state']),
('StateM12', 1, 0, 'text', lambda x, d, p: data['form']['12']['state']),
# ('Fiscal Year', 5, 0, 'text',
# lambda x, d, p: 'self._display_filter(p, d)'),
# ('Create Date', 5, 0, 'text',
# lambda x, d, p: 'Create date: ' + p.formatLang(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),date_time = True)),
#('Filter', 10, 0, 'text',
# lambda x, d, p: self._display_filter(p, d)),
# Balance column
#WITH DEPARTMENT
('Code', 1, 67, 'text', lambda x, d, p: ' '*x['level'] + x['name']),
('Name', 1, 270, 'text', lambda x, d, p: 'Budget'),
('MD1', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])),
('MD2', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], dep['id'])),
('MD3', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], dep['id'])),
('MD4', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], dep['id'])),
('MD5', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], dep['id'])),
('MD6', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], dep['id'])),
('MD7', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], dep['id'])),
('MD8', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], dep['id'])),
('MD9', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], dep['id'])),
('MD10', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], dep['id'])),
('MD11', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], dep['id'])),
('MD12', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, fy, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], dep['id'])),
('TotalD', 1, 270, 'number', lambda x, d, p: parser.get_total(as_of, fy, i['id'], i['type'], dep['id']) or 0.0),
('BudgetD', 1, 270, 'number', lambda x, d, p: parser.get_total_BudgetD(as_of, fy, i['id'], False, dep['id'])),
('VarianceD', 1, 270, 'number', lambda x, d, p: (parser.get_total_BudgetD(as_of, fy, i['id'], False, dep['id']))-(parser.get_total(as_of, fy, i['id'], i['type'], dep['id'])) or 0.0),
#NO DEPARTMENT
('M1', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False)),
('M2', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False)),
('M3', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False)),
('M4', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False)),
('M5', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False)),
('M6', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False)),
('M7', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False)),
('M8', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False)),
('M9', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False)),
('M10', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False)),
('M11', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False)),
('M12', 1, 270, 'number', lambda x, d, p: parser.get_period(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False)),
('Total', 1, 270, 'number', lambda x, d, p: parser.get_total(as_of, i['id'], i['type'], False) or 0.0),
('Budget', 1, 270, 'number', lambda x, d, p: x['balance'] or 0.0),
('Variance', 1, 270, 'number', lambda x, d, p: x['balance']-parser.get_total(as_of, fy, i['id'], i['type'], False) or 0.0),
#NO DEPARTMENT ACTUAL
('Actual', 1, 270, 'text', lambda x, d, p: 'Actual'),
('Mact1', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False)),
('Mact2', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False)),
('Mact3', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False)),
('Mact4', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False)),
('Mact5', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False)),
('Mact6', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False)),
('Mact7', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False)),
('Mact8', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False)),
('Mact9', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False)),
('Mact10', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False)),
('Mact11', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False)),
('Mact12', 1, 270, 'number', lambda x, d, p: parser.get_period_actual(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False)),
('Total', 1, 270, 'number', lambda x, d, p: parser.get_total(as_of, i['id'], i['type'], False) or 0.0),
('Budget', 1, 270, 'number', lambda x, d, p: x['balance'] or 0.0),
('Variance', 1, 270, 'number', lambda x, d, p: x['balance']-parser.get_total(as_of, fy, i['id'], i['type'], False) or 0.0),
#NO DEPARTMENT UNDER
('Under', 1, 270, 'text', lambda x, d, p: 'Under/(Over)'),
('Mund1', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False)),
('Mund2', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False)),
('Mund3', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False)),
('Mund4', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False)),
('Mund5', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False)),
('Mund6', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False)),
('Mund7', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False)),
('Mund8', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False)),
('Mund9', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False)),
('Mund10', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False)),
('Mund11', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False)),
('Mund12', 1, 270, 'number', lambda x, d, p: parser.get_period_under(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False)),
('Total', 1, 270, 'number', lambda x, d, p: parser.get_total(as_of, i['id'], i['type'], False) or 0.0),
('Budget', 1, 270, 'number', lambda x, d, p: x['balance'] or 0.0),
('Variance', 1, 270, 'number', lambda x, d, p: x['balance']-parser.get_total(as_of, fy, i['id'], i['type'], False) or 0.0),
]
row_hdr0 = self.xls_row_template(cols_specs, ['Kosong','Note','Note1'])
row_hdr1 = self.xls_row_template(cols_specs, ['Kosong','Kosong','Note2'])
row_hdr2 = self.xls_row_template(cols_specs, ['Kosong','Kosong','Note3'])
row_hdr3 = self.xls_row_template(cols_specs, ['Kosong','Space'])
row_hdr4 = self.xls_row_template(cols_specs, ['Kosong','Company'])
row_hdr5 = self.xls_row_template(cols_specs, ['Kosong','Judul'])
row_hdr6 = self.xls_row_template(cols_specs, ['Kosong','AsOff'])
row_hdr7 = self.xls_row_template(cols_specs, ['Space2'])
row_hdr_dep = self.xls_row_template(cols_specs, ['Kosong','Space_dep'])
row_hdr8 = self.xls_row_template(cols_specs, ['Kosong','HeaderCOA','HeaderDesc','HeaderM1','HeaderM2','HeaderM3','HeaderM4','HeaderM5','HeaderM6','HeaderM7','HeaderM8','HeaderM9','HeaderM10','HeaderM11','HeaderM12','HeaderTotal','HeaderBudget','HeaderVariance'])
row_hdr9 = self.xls_row_template(cols_specs, ['Kosong','StateCOA','StateDesc','StateM1','StateM2','StateM3','StateM4','StateM5','StateM6','StateM7','StateM8','StateM9','StateM10','StateM11','StateM12','Kosong','Kosong','Kosong'])
#row_hdr9 = self.xls_row_template(cols_specs, ['Kosong','Space'])
row_hdr10 = self.xls_row_template(cols_specs, ['Kosong','Space'])
row_hdr11 = self.xls_row_template(cols_specs, ['Kosong','Dept'])
row_loopDep = self.xls_row_template(cols_specs, ['Kosong','Kosong','Code','Name','MD1','MD2','MD3','MD4','MD5','MD6','MD7','MD8','MD9','MD10','MD11','MD12','TotalD','BudgetD','VarianceD'])#
row_loop = self.xls_row_template(cols_specs, ['Kosong','Kosong','Code','Kosong','Name','SP','M1','SP','M2','SP','M3','SP','M4','SP','M5','SP','M6','SP','M7','SP','M8','SP','M9','SP','M10','SP','M11','SP','M12','SP'])
row_loop_actual = self.xls_row_template(cols_specs, ['Kosong','Kosong','Kosong','Kosong','Actual','Kosong','Mact1','SP','Mact2','SP','Mact3','SP','Mact4','SP','Mact5','SP','Mact6','SP','Mact7','SP','Mact8','SP','Mact9','SP','Mact10','SP','Mact11','SP','Mact12','SP'])
row_loop_under = self.xls_row_template(cols_specs, ['Kosong','Kosong','Kosong','Kosong','Under','Kosong','Mund1','SP','Mund2','SP','Mund3','SP','Mund4','SP','Mund5','SP','Mund6','SP','Mund7','SP','Mund8','SP','Mund9','SP','Mund10','SP','Mund11','SP','Mund12','SP'])
row_total_cogs = self.xls_row_template(cols_specs, ['Kosong','TotalCOGSDesc','MtotCOGS1','MtotCOGS2','MtotCOGS3','MtotCOGS4','MtotCOGS5','MtotCOGS6','MtotCOGS7','MtotCOGS8','MtotCOGS9','MtotCOGS10','MtotCOGS11','MtotCOGS12','TotalCOGS','BudgetCOGS','VarianceCOGS'])
row_total_expense = self.xls_row_template(cols_specs, ['Kosong','TotalExpense','MtotEXP1','MtotEXP2','MtotEXP3','MtotEXP4','MtotEXP5','MtotEXP6','MtotEXP7','MtotEXP8','MtotEXP9','MtotEXP10','MtotEXP11','MtotEXP12','TotalEXP','BudgetEXP','VarianceEXP'])
#
#
row_total_cogsDep = self.xls_row_template(cols_specs, ['Kosong','TotalCOGSDepDesc','MtotCOGSDep1','MtotCOGSDep2','MtotCOGSDep3','MtotCOGSDep4','MtotCOGSDep5','MtotCOGSDep6','MtotCOGSDep7','MtotCOGSDep8','MtotCOGSDep9','MtotCOGSDep10','MtotCOGSDep11','MtotCOGSDep12','TotalCOGSDep','BudgetCOGSDep','VarianceCOGSDep'])
row_total_expenseDep = self.xls_row_template(cols_specs, ['Kosong','TotalExpenseDep','MtotEXPDep1','MtotEXPDep2','MtotEXPDep3','MtotEXPDep4','MtotEXPDep5','MtotEXPDep6','MtotEXPDep7','MtotEXPDep8','MtotEXPDep9','MtotEXPDep10','MtotEXPDep11','MtotEXPDep12','TotalEXPDep','BudgetEXPDep','VarianceEXPDep'])
## Style variable Begin borders: top thick, bottom solid, left double, right double;
hdr_style = xlwt.easyxf('pattern: pattern solid, fore_color gray25;')
row_normal_style= xlwt.easyxf('font: height 170, colour_index black;pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
row_bold_style = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
row_bold_style_total = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white;borders: top thin, bottom medium;',num_format_str='#,##0;(#,##0)')
style = xlwt.easyxf(styles['reversed'])
style_bold = xlwt.easyxf('font: height 170, colour_index black, bold on; align: wrap on, vert top, horiz right;pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
style_italic_bold = xlwt.easyxf('font: height 170, colour_index black, bold on, italic on; align: wrap on, vert top, horiz left;pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
style_under = xlwt.easyxf('font: height 170, colour_index black;pattern: pattern solid, fore_color white; borders: top thin;',num_format_str='#,##0;(#,##0)')
style_under_bold = xlwt.easyxf('font: height 170, colour_index black, bold on; align: wrap on, vert top, horiz right;pattern: pattern solid, fore_color white; borders: top thin;',num_format_str='#,##0;(#,##0)')
tittle_style_closed = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white; borders: right thin;',num_format_str='#,##0;(#,##0)')
tittle_style_closed_dep = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white; borders: bottom double;',num_format_str='#,##0;(#,##0)')
tittle_style_closed_bottom = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white; borders: right thin, bottom thin;',num_format_str='#,##0;(#,##0)')
tittle_style = xlwt.easyxf('font: height 180,name Arial, colour_index white, bold on; pattern: pattern solid, fore_color brown;')
tittle_style2 = xlwt.easyxf('font: height 180,name Arial, colour_index white, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
tittle_bold_left_style = xlwt.easyxf('font: height 200, name Arial, colour_index black, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
tittle_left_style = xlwt.easyxf('font: height 200, name Arial, colour_index black; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
tittle_bold_left_style2 = xlwt.easyxf('font: height 200, name Arial, colour_index black, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;borders: bottom double;')
tittle_left_italic_style = xlwt.easyxf('font: height 190, name Arial, colour_index black, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
tittle_bold_center_style_top = xlwt.easyxf('font: height 190, name Arial, colour_index black, bold on; align: wrap on, vert centre, horiz centre; pattern: pattern solid, fore_color gray25;borders: top thin;')
tittle_bold_center_style_bottom = xlwt.easyxf('font: height 190, name Arial, colour_index black, bold on; align: wrap on, vert centre, horiz centre; pattern: pattern solid, fore_color gray25;borders: bottom thin;')
#row_normal_style = xlwt.easyxf('font: height 170, name Arial, colour_index black; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
#row_bold_style = xlwt.easyxf('font: height 180, name Arial, colour_index black, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
subtittle_right_style = xlwt.easyxf('font: height 170, name Arial, colour_index black, bold on, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color gray25;')
subtittle_top_and_bottom_style = xlwt.easyxf('font: height 240, name Arial, colour_index black, bold off, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color gray25;')
blank_style = xlwt.easyxf('font: height 650, name Arial, colour_index brown, bold off; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color gray25;')
normal_style = xlwt.easyxf('font: height 240, name Arial, colour_index black, bold off; align: wrap on, vert centre, horiz left;')
total_style = xlwt.easyxf('font: height 240, name Arial, colour_index brown, bold on, italic on; align: wrap on, vert centre;', num_format_str='#,##0.00;(#,##0.00)')
## Style variable End
# Write headers
# ws.write(0, 0, '', tittle_style2)
# ws.write(0, 1, '', tittle_style2)
# ws.write(0, 2, 'Note: ', tittle_style)
# ws.write(0, 3, '1.', tittle_style)
# ws.write(0, 4, 'This rolling report should include P&L, cashflow & balance sheet', tittle_style)
# #ws.write(0,5,Formula('$A$1/$B$1'))
# for x in [5,6,7,8,9]:
# ws.write(0, x, '', tittle_style)
# for x in [10,11,12,13,14,15,16,17,18,19,20,21,22,23]:
# ws.write(0, x, '', tittle_style2)
#
# ws.write(1, 0, '', tittle_style2)
# ws.write(1, 1, '', tittle_style2)
# ws.write(1, 2, '', tittle_style)
# ws.write(1, 3, '2.', tittle_style)
# ws.write(1, 4, 'ERP should produce both detail & summary (high level, major accounts)', tittle_style)
# for x in [5,6,7,8,9]:
# ws.write(1, x, '', tittle_style)
# for x in [10,11,12,13,14,15,16,17,18,19,20,21,22,23]:
# ws.write(1, x, '', tittle_style2)
#
# ws.write(2, 0, '', tittle_style2)
# ws.write(2, 1, '', tittle_style2)
# ws.write(2, 2, '', tittle_style)
# ws.write(2, 3, '3.', tittle_style)
# ws.write(2, 4, 'Need to add Revenue', tittle_style)
# for x in [5,6,7,8,9]:
# ws.write(2, x, '', tittle_style)
# for x in [10,11,12,13,14,15,16,17,18,19,20,21,22,23]:
# ws.write(2, x, '', tittle_style2)
#====================================================================
# self.xls_write_row(ws, None, data, parser, 3, row_hdr0, tittle_style)
# self.xls_write_row(ws, None, data, parser, 4, row_hdr1, tittle_style)
# self.xls_write_row(ws, None, data, parser, 5, row_hdr2, tittle_style)
# self.xls_write_row(ws, None, data, parser, 3, row_hdr3, tittle_style2)#Space
for u in [0,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]:
ws.write(4, u, '', tittle_style2)
#ws.write(4, 31, '', tittle_style_closed)
for s in [1,2,4,6,8,10,12,14,16,18,20,22,24,26,28]:
ws.write(4, s, '', tittle_bold_center_style_top)
ws.write(4, 30, 'TOTAL', tittle_bold_center_style_top)
#ws.write(4, 32, 'TOTAL', tittle_bold_center_style_top)
#ws.write(4, 34, 'REMAINING', tittle_bold_center_style_top)
#ws.write(6, 0, '', tittle_style2)
ws.write(5, 1, 'NO.', tittle_bold_center_style_bottom)
ws.write(5, 2, 'Account', tittle_bold_center_style_bottom)
ws.write(5, 4, 'Descriptions', tittle_bold_center_style_bottom)
ws.write(5, 6, data['form']['1']['date'], tittle_bold_center_style_bottom)
ws.write(5, 8, data['form']['2']['date'], tittle_bold_center_style_bottom)
ws.write(5, 10, data['form']['3']['date'], tittle_bold_center_style_bottom)
ws.write(5, 12, data['form']['4']['date'], tittle_bold_center_style_bottom)
ws.write(5, 14, data['form']['5']['date'], tittle_bold_center_style_bottom)
ws.write(5, 16, data['form']['6']['date'], tittle_bold_center_style_bottom)
ws.write(5, 18, data['form']['7']['date'], tittle_bold_center_style_bottom)
ws.write(5, 20, data['form']['8']['date'], tittle_bold_center_style_bottom)
ws.write(5, 22, data['form']['9']['date'], tittle_bold_center_style_bottom)
ws.write(5, 24, data['form']['10']['date'], tittle_bold_center_style_bottom)
ws.write(5, 26, data['form']['11']['date'], tittle_bold_center_style_bottom)
ws.write(5, 28, data['form']['12']['date'], tittle_bold_center_style_bottom)
ws.write(5, 30, '', tittle_bold_center_style_bottom)
#ws.write(5, 32, 'BUDGET', tittle_bold_center_style_bottom)
#ws.write(5, 34, 'BUDGET', tittle_bold_center_style_bottom)
#ws.write(6, 5, '', tittle_style2)
for a in [0,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31]:
ws.write(5, a, '', tittle_style2)
#ws.write(5, 31, '', tittle_style_closed)
# for b in [6,8,10,12,14,16,18,20,22,24,26,28,30]:
# ws.write(5, b, '', tittle_bold_center_style_bottom)
self.xls_write_row(ws, None, data, parser, 0, row_hdr4, tittle_left_style)#Company
self.xls_write_row(ws, None, data, parser, 1, row_hdr5, tittle_left_style)#Budget Rolling
self.xls_write_row(ws, None, data, parser, 2, row_hdr6, tittle_left_italic_style)#As of
self.xls_write_row(ws, None, data, parser, 3, row_hdr7, tittle_left_style)#Space
#self.xls_write_row(ws, None, data, parser, 5, row_hdr10, tittle_bold_center_style_top)#header space
#self.xls_write_row(ws, None, data, parser, 6, row_hdr8, tittle_bold_center_style_bottom)#header
#self.xls_write_row(ws, None, data, parser, 6, row_hdr7, tittle_style2)#space white
#self.xls_write_row(ws, None, data, parser, 6, row_hdr9, tittle_bold_center_style)#state
row_count = 6
ws.horz_split_pos = row_count
#IF WITH DEPARTMENT
if len(parser._get_department(data))>0:
for dep in parser._get_department(data):
self.xls_write_row(ws, dep, data, parser, row_count, row_hdr_dep, tittle_style_closed_dep)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr7, tittle_style_closed)
row_count += 1
number=0
for i in parser.get_data(data):
if i['type'] == 'view':
style = row_bold_style
else:
style = row_normal_style
if data['form']['without_zero']:
if i['balance']:
for a in [0,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, a, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop, style)
ws.write(row_count, 1, number, style)
ws.write(row_count, 2, ' '*i['level'] + i['name'], style)
ws.write(row_count, 4, 'Budget :', style_bold)
ws.write(row_count, 6, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 8, parser.get_period(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 10, parser.get_period(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 12, parser.get_period(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 14, parser.get_period(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 16, parser.get_period(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 18, parser.get_period(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 20, parser.get_period(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 22, parser.get_period(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 24, parser.get_period(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 26, parser.get_period(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 28, parser.get_period(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], dep['id']), style)
total_budget = parser.get_period_budget_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'], dep['id'])
ws.write(row_count, 30, total_budget, style)
row_count += 1
#ws.write(row_count, 20, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), tittle_bold_center_style_bottom)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_actual, style)
for b in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, b, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Actual :', style_bold)
ws.write(row_count, 6, parser.get_period_actual(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 8, parser.get_period_actual(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 10, parser.get_period_actual(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 12, parser.get_period_actual(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 14, parser.get_period_actual(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 16, parser.get_period_actual(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 18, parser.get_period_actual(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 20, parser.get_period_actual(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 22, parser.get_period_actual(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 24, parser.get_period_actual(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 26, parser.get_period_actual(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 28, parser.get_period_actual(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], dep['id']), style)
total_actual = parser.get_period_actual_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'], dep['id'])
ws.write(row_count, 30, total_actual, style)
row_count += 1
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_under, style_under)
for c in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, c, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Under/(Over) :', style_under_bold)
ws.write(row_count, 6, parser.get_period_under(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 8, parser.get_period_under(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 10, parser.get_period_under(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 12, parser.get_period_under(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 14, parser.get_period_under(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 16, parser.get_period_under(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 18, parser.get_period_under(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 20, parser.get_period_under(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 22, parser.get_period_under(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 24, parser.get_period_under(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 26, parser.get_period_under(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 28, parser.get_period_under(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 30, (total_budget-total_actual), style_under)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Actual', style_italic_bold)
for e in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, e, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
if data['form']['with_transaction'] and i['type'] == 'normal':
for t in parser.get_transaction(data['form']['as_of_date'],i['id']):
actual_1 = get_transaction_period(as_of,i['id'], data['form']['1']['start'], data['form']['1']['end'], t['id'], dep['id'])
actual_2 = get_transaction_period(as_of,i['id'], data['form']['2']['start'], data['form']['2']['end'], t['id'], dep['id'])
actual_3 = get_transaction_period(as_of,i['id'], data['form']['3']['start'], data['form']['3']['end'], t['id'], dep['id'])
actual_4 = get_transaction_period(as_of,i['id'], data['form']['4']['start'], data['form']['4']['end'], t['id'], dep['id'])
actual_5 = get_transaction_period(as_of,i['id'], data['form']['5']['start'], data['form']['5']['end'], t['id'], dep['id'])
actual_6 = get_transaction_period(as_of,i['id'], data['form']['6']['start'], data['form']['6']['end'], t['id'], dep['id'])
actual_7 = get_transaction_period(as_of,i['id'], data['form']['7']['start'], data['form']['7']['end'], t['id'], dep['id'])
actual_8 = get_transaction_period(as_of,i['id'], data['form']['8']['start'], data['form']['8']['end'], t['id'], dep['id'])
actual_9 = get_transaction_period(as_of,i['id'], data['form']['9']['start'], data['form']['9']['end'], t['id'], dep['id'])
actual_10 = get_transaction_period(as_of,i['id'], data['form']['10']['start'], data['form']['10']['end'], t['id'], dep['id'])
actual_11 = get_transaction_period(as_of,i['id'], data['form']['11']['start'], data['form']['11']['end'], t['id'], dep['id'])
actual_12 = get_transaction_period(as_of,i['id'], data['form']['12']['start'], data['form']['12']['end'], t['id'], dep['id'])
#--------
ws.write(row_count, 4, '- '+t.name, style)
ws.write(row_count, 6, actual_1, style)
ws.write(row_count, 8, actual_2, style)
ws.write(row_count, 10, actual_3, style)
ws.write(row_count, 12, actual_4, style)
ws.write(row_count, 14, actual_5, style)
ws.write(row_count, 16, actual_6, style)
ws.write(row_count, 18, actual_7, style)
ws.write(row_count, 20, actual_8, style)
ws.write(row_count, 22, actual_9, style)
ws.write(row_count, 24, actual_10, style)
ws.write(row_count, 26, actual_11, style)
ws.write(row_count, 28, actual_12, style)
actual_total = actual_1 + actual_2 + actual_3 + actual_4 + actual_5 + actual_6 + actual_7 + actual_8 + actual_9 + actual_10 + actual_11 + actual_12
ws.write(row_count, 30, actual_total, style)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Budget (unutilized)', style_italic_bold)
for f in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, f, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
unutulized_1 = parser.get_period_unutilized(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_2 = parser.get_period_unutilized(as_of,data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_3 = parser.get_period_unutilized(as_of,data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_4 = parser.get_period_unutilized(as_of,data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_5 = parser.get_period_unutilized(as_of,data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_6 = parser.get_period_unutilized(as_of,data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_7 = parser.get_period_unutilized(as_of,data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_8 = parser.get_period_unutilized(as_of,data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_9 = parser.get_period_unutilized(as_of,data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_10 = parser.get_period_unutilized(as_of,data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_11 = parser.get_period_unutilized(as_of,data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_12 = parser.get_period_unutilized(as_of,data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
ws.write(row_count, 2, ' '*i['level']+'- %s'%parser.get_desc_budget_line(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style)
for z in [0,1,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, z, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 6, unutulized_1, style)
ws.write(row_count, 8, unutulized_2, style)
ws.write(row_count, 10, unutulized_3, style)
ws.write(row_count, 12, unutulized_4, style)
ws.write(row_count, 14, unutulized_5, style)
ws.write(row_count, 16, unutulized_6, style)
ws.write(row_count, 18, unutulized_7, style)
ws.write(row_count, 20, unutulized_8, style)
ws.write(row_count, 22, unutulized_9, style)
ws.write(row_count, 24, unutulized_10, style)
ws.write(row_count, 26, unutulized_11, style)
ws.write(row_count, 28, unutulized_12, style)
unutilized_total = unutulized_1 + unutulized_2 + unutulized_3 + unutulized_4 + unutulized_5 + unutulized_6 + unutulized_7 + unutulized_8 + unutulized_9 + unutulized_10 + unutulized_11 + unutulized_12
ws.write(row_count, 30, unutilized_total, style)
row_count += 1
number+=1
#ELSE WITH ZERO
else:
for a in [0,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, a, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop, style)
ws.write(row_count, 1, number, style)
ws.write(row_count, 2, ' '*i['level'] + i['name'], style)
ws.write(row_count, 4, 'Budget :', style_bold)
ws.write(row_count, 6, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 8, parser.get_period(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 10, parser.get_period(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 12, parser.get_period(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 14, parser.get_period(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 16, parser.get_period(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 18, parser.get_period(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 20, parser.get_period(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 22, parser.get_period(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 24, parser.get_period(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 26, parser.get_period(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 28, parser.get_period(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], dep['id']), style)
total_budget = parser.get_period_budget_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'])
ws.write(row_count, 30, total_budget, style)
row_count += 1
#ws.write(row_count, 20, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), tittle_bold_center_style_bottom)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_actual, style)
for b in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, b, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Actual :', style_bold)
ws.write(row_count, 6, parser.get_period_actual(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 8, parser.get_period_actual(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 10, parser.get_period_actual(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 12, parser.get_period_actual(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 14, parser.get_period_actual(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 16, parser.get_period_actual(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 18, parser.get_period_actual(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 20, parser.get_period_actual(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 22, parser.get_period_actual(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 24, parser.get_period_actual(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 26, parser.get_period_actual(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], dep['id']), style)
ws.write(row_count, 28, parser.get_period_actual(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], dep['id']), style)
total_actual = parser.get_period_actual_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'],)
ws.write(row_count, 30, total_actual, style)
row_count += 1
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_under, style_under)
for c in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, c, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Under/(Over) :', style_under_bold)
ws.write(row_count, 6, parser.get_period_under(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 8, parser.get_period_under(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 10, parser.get_period_under(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 12, parser.get_period_under(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 14, parser.get_period_under(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 16, parser.get_period_under(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 18, parser.get_period_under(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 20, parser.get_period_under(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 22, parser.get_period_under(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 24, parser.get_period_under(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 26, parser.get_period_under(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 28, parser.get_period_under(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], dep['id']), style_under)
ws.write(row_count, 30, (total_budget-total_actual), style_under)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Actual', style_italic_bold)
for e in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, e, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
if data['form']['with_transaction'] and i['type'] == 'normal':
for t in parser.get_transaction(data['form']['as_of_date'],i['id']):
actual_1 = get_transaction_period(as_of,i['id'], data['form']['1']['start'], data['form']['1']['end'], t['id'], dep['id'])
actual_2 = get_transaction_period(as_of,i['id'], data['form']['2']['start'], data['form']['2']['end'], t['id'], dep['id'])
actual_3 = get_transaction_period(as_of,i['id'], data['form']['3']['start'], data['form']['3']['end'], t['id'], dep['id'])
actual_4 = get_transaction_period(as_of,i['id'], data['form']['4']['start'], data['form']['4']['end'], t['id'], dep['id'])
actual_5 = get_transaction_period(as_of,i['id'], data['form']['5']['start'], data['form']['5']['end'], t['id'], dep['id'])
actual_6 = get_transaction_period(as_of,i['id'], data['form']['6']['start'], data['form']['6']['end'], t['id'], dep['id'])
actual_7 = get_transaction_period(as_of,i['id'], data['form']['7']['start'], data['form']['7']['end'], t['id'], dep['id'])
actual_8 = get_transaction_period(as_of,i['id'], data['form']['8']['start'], data['form']['8']['end'], t['id'], dep['id'])
actual_9 = get_transaction_period(as_of,i['id'], data['form']['9']['start'], data['form']['9']['end'], t['id'], dep['id'])
actual_10 = get_transaction_period(as_of,i['id'], data['form']['10']['start'], data['form']['10']['end'], t['id'], dep['id'])
actual_11 = get_transaction_period(as_of,i['id'], data['form']['11']['start'], data['form']['11']['end'], t['id'], dep['id'])
actual_12 = get_transaction_period(as_of,i['id'], data['form']['12']['start'], data['form']['12']['end'], t['id'], dep['id'])
#--------
ws.write(row_count, 4, '- '+t.name, style)
ws.write(row_count, 6, actual_1, style)
ws.write(row_count, 8, actual_2, style)
ws.write(row_count, 10, actual_3, style)
ws.write(row_count, 12, actual_4, style)
ws.write(row_count, 14, actual_5, style)
ws.write(row_count, 16, actual_6, style)
ws.write(row_count, 18, actual_7, style)
ws.write(row_count, 20, actual_8, style)
ws.write(row_count, 22, actual_9, style)
ws.write(row_count, 24, actual_10, style)
ws.write(row_count, 26, actual_11, style)
ws.write(row_count, 28, actual_12, style)
actual_total = actual_1 + actual_2 + actual_3 + actual_4 + actual_5 + actual_6 + actual_7 + actual_8 + actual_9 + actual_10 + actual_11 + actual_12
ws.write(row_count, 30, actual_total, style)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Budget (unutilized)', style_italic_bold)
for f in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, f, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
unutulized_1 = parser.get_period_unutilized(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_2 = parser.get_period_unutilized(as_of,data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_3 = parser.get_period_unutilized(as_of,data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_4 = parser.get_period_unutilized(as_of,data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_5 = parser.get_period_unutilized(as_of,data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_6 = parser.get_period_unutilized(as_of,data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_7 = parser.get_period_unutilized(as_of,data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_8 = parser.get_period_unutilized(as_of,data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_9 = parser.get_period_unutilized(as_of,data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_10 = parser.get_period_unutilized(as_of,data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_11 = parser.get_period_unutilized(as_of,data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
unutulized_12 = parser.get_period_unutilized(as_of,data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id'])
ws.write(row_count, 2, ' '*i['level']+'- %s'%parser.get_desc_budget_line(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], dep['id']), style)
for z in [0,1,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, z, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 6, unutulized_1, style)
ws.write(row_count, 8, unutulized_2, style)
ws.write(row_count, 10, unutulized_3, style)
ws.write(row_count, 12, unutulized_4, style)
ws.write(row_count, 14, unutulized_5, style)
ws.write(row_count, 16, unutulized_6, style)
ws.write(row_count, 18, unutulized_7, style)
ws.write(row_count, 20, unutulized_8, style)
ws.write(row_count, 22, unutulized_9, style)
ws.write(row_count, 24, unutulized_10, style)
ws.write(row_count, 26, unutulized_11, style)
ws.write(row_count, 28, unutulized_12, style)
unutilized_total = unutulized_1 + unutulized_2 + unutulized_3 + unutulized_4 + unutulized_5 + unutulized_6 + unutulized_7 + unutulized_8 + unutulized_9 + unutulized_10 + unutulized_11 + unutulized_12
ws.write(row_count, 30, unutilized_total, style)
row_count += 1
number+=1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr7, tittle_style_closed)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr7, tittle_style_closed_bottom)
row_count += 1
else:
#ELSE WITHOUT DEPARTMENT
self.xls_write_row(ws, None, data, parser, row_count, row_hdr7, tittle_style_closed)
row_count += 1
number=0
for i in parser.get_data(data):
number+=1
if i['type'] == 'view':
style = row_bold_style
else:
style = row_normal_style
if data['form']['without_zero']:
if i['balance']:
for a in [0,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, a, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop, style)
ws.write(row_count, 1, number, style)
ws.write(row_count, 2, ' '*i['level'] + i['name'], style)
ws.write(row_count, 4, 'Budget :', style_bold)
ws.write(row_count, 6, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 8, parser.get_period(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 10, parser.get_period(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 12, parser.get_period(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 14, parser.get_period(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 16, parser.get_period(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 18, parser.get_period(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 20, parser.get_period(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 22, parser.get_period(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 24, parser.get_period(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 26, parser.get_period(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 28, parser.get_period(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False), style)
total_budget = parser.get_period_budget_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'])
ws.write(row_count, 30, total_budget, style)
row_count += 1
#ws.write(row_count, 20, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), tittle_bold_center_style_bottom)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_actual, style)
for b in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, b, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Actual :', style_bold)
ws.write(row_count, 6, parser.get_period_actual(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 8, parser.get_period_actual(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 10, parser.get_period_actual(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 12, parser.get_period_actual(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 14, parser.get_period_actual(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 16, parser.get_period_actual(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 18, parser.get_period_actual(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 20, parser.get_period_actual(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 22, parser.get_period_actual(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 24, parser.get_period_actual(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 26, parser.get_period_actual(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 28, parser.get_period_actual(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False), style)
total_actual = parser.get_period_actual_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'],)
ws.write(row_count, 30, total_actual, style)
row_count += 1
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_under, style_under)
for c in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, c, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Under/(Over) :', style_under_bold)
ws.write(row_count, 6, parser.get_period_under(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 8, parser.get_period_under(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 10, parser.get_period_under(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 12, parser.get_period_under(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 14, parser.get_period_under(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 16, parser.get_period_under(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 18, parser.get_period_under(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 20, parser.get_period_under(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 22, parser.get_period_under(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 24, parser.get_period_under(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 26, parser.get_period_under(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 28, parser.get_period_under(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 30, (total_budget-total_actual), style_under)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Actual', style_italic_bold)
for e in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, e, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
if data['form']['with_transaction'] and i['type'] == 'normal':
for t in parser.get_transaction(data['form']['as_of_date'],i['id']):
actual_1 = get_transaction_period(as_of,i['id'], data['form']['1']['start'], data['form']['1']['end'], t['id'],)
actual_2 = get_transaction_period(as_of,i['id'], data['form']['2']['start'], data['form']['2']['end'], t['id'],)
actual_3 = get_transaction_period(as_of,i['id'], data['form']['3']['start'], data['form']['3']['end'], t['id'],)
actual_4 = get_transaction_period(as_of,i['id'], data['form']['4']['start'], data['form']['4']['end'], t['id'],)
actual_5 = get_transaction_period(as_of,i['id'], data['form']['5']['start'], data['form']['5']['end'], t['id'],)
actual_6 = get_transaction_period(as_of,i['id'], data['form']['6']['start'], data['form']['6']['end'], t['id'],)
actual_7 = get_transaction_period(as_of,i['id'], data['form']['7']['start'], data['form']['7']['end'], t['id'],)
actual_8 = get_transaction_period(as_of,i['id'], data['form']['8']['start'], data['form']['8']['end'], t['id'],)
actual_9 = get_transaction_period(as_of,i['id'], data['form']['9']['start'], data['form']['9']['end'], t['id'],)
actual_10 = get_transaction_period(as_of,i['id'], data['form']['10']['start'], data['form']['10']['end'], t['id'],)
actual_11 = get_transaction_period(as_of,i['id'], data['form']['11']['start'], data['form']['11']['end'], t['id'],)
actual_12 = get_transaction_period(as_of,i['id'], data['form']['12']['start'], data['form']['12']['end'], t['id'],)
#--------
ws.write(row_count, 4, '- '+t.name, style)
ws.write(row_count, 6, actual_1, style)
ws.write(row_count, 8, actual_2, style)
ws.write(row_count, 10, actual_3, style)
ws.write(row_count, 12, actual_4, style)
ws.write(row_count, 14, actual_5, style)
ws.write(row_count, 16, actual_6, style)
ws.write(row_count, 18, actual_7, style)
ws.write(row_count, 20, actual_8, style)
ws.write(row_count, 22, actual_9, style)
ws.write(row_count, 24, actual_10, style)
ws.write(row_count, 26, actual_11, style)
ws.write(row_count, 28, actual_12, style)
actual_total = actual_1 + actual_2 + actual_3 + actual_4 + actual_5 + actual_6 + actual_7 + actual_8 + actual_9 + actual_10 + actual_11 + actual_12
ws.write(row_count, 30, actual_total, style)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Budget (unutilized)', style_italic_bold)
for f in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, f, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
unutulized_1 = parser.get_period_unutilized(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_2 = parser.get_period_unutilized(as_of,data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_3 = parser.get_period_unutilized(as_of,data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_4 = parser.get_period_unutilized(as_of,data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_5 = parser.get_period_unutilized(as_of,data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_6 = parser.get_period_unutilized(as_of,data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_7 = parser.get_period_unutilized(as_of,data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_8 = parser.get_period_unutilized(as_of,data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_9 = parser.get_period_unutilized(as_of,data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_10 = parser.get_period_unutilized(as_of,data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_11 = parser.get_period_unutilized(as_of,data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_12 = parser.get_period_unutilized(as_of,data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['1']['end'], i['type'], i['item'])
ws.write(row_count, 2, ' '*i['level']+'- %s'%parser.get_desc_budget_line(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item']), style)
for z in [0,1,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, z, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 6, unutulized_1, style)
ws.write(row_count, 8, unutulized_2, style)
ws.write(row_count, 10, unutulized_3, style)
ws.write(row_count, 12, unutulized_4, style)
ws.write(row_count, 14, unutulized_5, style)
ws.write(row_count, 16, unutulized_6, style)
ws.write(row_count, 18, unutulized_7, style)
ws.write(row_count, 20, unutulized_8, style)
ws.write(row_count, 22, unutulized_9, style)
ws.write(row_count, 24, unutulized_10, style)
ws.write(row_count, 26, unutulized_11, style)
ws.write(row_count, 28, unutulized_12, style)
unutilized_total = unutulized_1 + unutulized_2 + unutulized_3 + unutulized_4 + unutulized_5 + unutulized_6 + unutulized_7 + unutulized_8 + unutulized_9 + unutulized_10 + unutulized_11 + unutulized_12
ws.write(row_count, 30, unutilized_total, style)
row_count += 1
#ELSE WITH ZERO
else:
for a in [0,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, a, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop, style)
ws.write(row_count, 1, number, style)
ws.write(row_count, 2, ' '*i['level'] + i['name'], style)
ws.write(row_count, 4, 'Budget :', style_bold)
ws.write(row_count, 6, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 8, parser.get_period(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 10, parser.get_period(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 12, parser.get_period(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 14, parser.get_period(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 16, parser.get_period(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 18, parser.get_period(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 20, parser.get_period(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 22, parser.get_period(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 24, parser.get_period(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 26, parser.get_period(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 28, parser.get_period(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False), style)
total_budget = parser.get_period_budget_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'])
ws.write(row_count, 30, total_budget, style)
row_count += 1
#ws.write(row_count, 20, parser.get_period(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), tittle_bold_center_style_bottom)
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_actual, style)
for b in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, b, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Actual :', style_bold)
ws.write(row_count, 6, parser.get_period_actual(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 8, parser.get_period_actual(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 10, parser.get_period_actual(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 12, parser.get_period_actual(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 14, parser.get_period_actual(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 16, parser.get_period_actual(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 18, parser.get_period_actual(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 20, parser.get_period_actual(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 22, parser.get_period_actual(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 24, parser.get_period_actual(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 26, parser.get_period_actual(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False), style)
ws.write(row_count, 28, parser.get_period_actual(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False), style)
total_actual = parser.get_period_actual_total(data['form']['fiscalyear_id'],data['form']['as_of_date'],i['id'], i['type'],)
ws.write(row_count, 30, total_actual, style)
row_count += 1
#self.xls_write_row(ws, i, data, parser, row_count, row_loop_under, style_under)
for c in [0,1,2,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, c, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 4, 'Under/(Over) :', style_under_bold)
ws.write(row_count, 6, parser.get_period_under(as_of, data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 8, parser.get_period_under(as_of, data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['2']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 10, parser.get_period_under(as_of, data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['3']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 12, parser.get_period_under(as_of, data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['4']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 14, parser.get_period_under(as_of, data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['5']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 16, parser.get_period_under(as_of, data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['6']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 18, parser.get_period_under(as_of, data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['7']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 20, parser.get_period_under(as_of, data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['8']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 22, parser.get_period_under(as_of, data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['9']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 24, parser.get_period_under(as_of, data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['10']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 26, parser.get_period_under(as_of, data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['11']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 28, parser.get_period_under(as_of, data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['12']['end'], i['type'], i['item'], False), style_under)
ws.write(row_count, 30, (total_budget-total_actual), style_under)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Actual', style_italic_bold)
for e in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, e, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
if data['form']['with_transaction'] and i['type'] == 'normal':
for t in parser.get_transaction(data['form']['as_of_date'],i['id']):
actual_1 = get_transaction_period(as_of,i['id'], data['form']['1']['start'], data['form']['1']['end'], t['id'],)
actual_2 = get_transaction_period(as_of,i['id'], data['form']['2']['start'], data['form']['2']['end'], t['id'],)
actual_3 = get_transaction_period(as_of,i['id'], data['form']['3']['start'], data['form']['3']['end'], t['id'],)
actual_4 = get_transaction_period(as_of,i['id'], data['form']['4']['start'], data['form']['4']['end'], t['id'],)
actual_5 = get_transaction_period(as_of,i['id'], data['form']['5']['start'], data['form']['5']['end'], t['id'],)
actual_6 = get_transaction_period(as_of,i['id'], data['form']['6']['start'], data['form']['6']['end'], t['id'],)
actual_7 = get_transaction_period(as_of,i['id'], data['form']['7']['start'], data['form']['7']['end'], t['id'],)
actual_8 = get_transaction_period(as_of,i['id'], data['form']['8']['start'], data['form']['8']['end'], t['id'],)
actual_9 = get_transaction_period(as_of,i['id'], data['form']['9']['start'], data['form']['9']['end'], t['id'],)
actual_10 = get_transaction_period(as_of,i['id'], data['form']['10']['start'], data['form']['10']['end'], t['id'],)
actual_11 = get_transaction_period(as_of,i['id'], data['form']['11']['start'], data['form']['11']['end'], t['id'],)
actual_12 = get_transaction_period(as_of,i['id'], data['form']['12']['start'], data['form']['12']['end'], t['id'],)
#--------
ws.write(row_count, 4, '- '+t.name, style)
ws.write(row_count, 6, actual_1, style)
ws.write(row_count, 8, actual_2, style)
ws.write(row_count, 10, actual_3, style)
ws.write(row_count, 12, actual_4, style)
ws.write(row_count, 14, actual_5, style)
ws.write(row_count, 16, actual_6, style)
ws.write(row_count, 18, actual_7, style)
ws.write(row_count, 20, actual_8, style)
ws.write(row_count, 22, actual_9, style)
ws.write(row_count, 24, actual_10, style)
ws.write(row_count, 26, actual_11, style)
ws.write(row_count, 28, actual_12, style)
actual_total = actual_1 + actual_2 + actual_3 + actual_4 + actual_5 + actual_6 + actual_7 + actual_8 + actual_9 + actual_10 + actual_11 + actual_12
ws.write(row_count, 30, actual_total, style)
row_count += 1
ws.write(row_count, 2, ' '*i['level']+'Budget (unutilized)', style_italic_bold)
for f in [0,1,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]:
ws.write(row_count, f, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
row_count += 1
unutulized_1 = parser.get_period_unutilized(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_2 = parser.get_period_unutilized(as_of,data['form']['2']['id'], i['id'], data['form']['2']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_3 = parser.get_period_unutilized(as_of,data['form']['3']['id'], i['id'], data['form']['3']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_4 = parser.get_period_unutilized(as_of,data['form']['4']['id'], i['id'], data['form']['4']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_5 = parser.get_period_unutilized(as_of,data['form']['5']['id'], i['id'], data['form']['5']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_6 = parser.get_period_unutilized(as_of,data['form']['6']['id'], i['id'], data['form']['6']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_7 = parser.get_period_unutilized(as_of,data['form']['7']['id'], i['id'], data['form']['7']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_8 = parser.get_period_unutilized(as_of,data['form']['8']['id'], i['id'], data['form']['8']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_9 = parser.get_period_unutilized(as_of,data['form']['9']['id'], i['id'], data['form']['9']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_10 = parser.get_period_unutilized(as_of,data['form']['10']['id'], i['id'], data['form']['10']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_11 = parser.get_period_unutilized(as_of,data['form']['11']['id'], i['id'], data['form']['11']['start'], data['form']['1']['end'], i['type'], i['item'])
unutulized_12 = parser.get_period_unutilized(as_of,data['form']['12']['id'], i['id'], data['form']['12']['start'], data['form']['1']['end'], i['type'], i['item'])
ws.write(row_count, 2, ' '*i['level']+'- %s'%parser.get_desc_budget_line(as_of,data['form']['1']['id'], i['id'], data['form']['1']['start'], data['form']['1']['end'], i['type'], i['item']), style)
for z in [0,1,3,5,7,9,11,13,15,17,19,21,23,25,27,29]:
ws.write(row_count, z, '', tittle_style2)
ws.write(row_count, 31, '', tittle_style_closed)
ws.write(row_count, 6, unutulized_1, style)
ws.write(row_count, 8, unutulized_2, style)
ws.write(row_count, 10, unutulized_3, style)
ws.write(row_count, 12, unutulized_4, style)
ws.write(row_count, 14, unutulized_5, style)
ws.write(row_count, 16, unutulized_6, style)
ws.write(row_count, 18, unutulized_7, style)
ws.write(row_count, 20, unutulized_8, style)
ws.write(row_count, 22, unutulized_9, style)
ws.write(row_count, 24, unutulized_10, style)
ws.write(row_count, 26, unutulized_11, style)
ws.write(row_count, 28, unutulized_12, style)
unutilized_total = unutulized_1 + unutulized_2 + unutulized_3 + unutulized_4 + unutulized_5 + unutulized_6 + unutulized_7 + unutulized_8 + unutulized_9 + unutulized_10 + unutulized_11 + unutulized_12
ws.write(row_count, 30, unutilized_total, style)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr7, tittle_style_closed_bottom)
row_count += 1
pass
budget_detail_xls(
'report.budget.detail.xls',
'ad_budget.item',
'addons/ad_budget_detail/report/budget_detail.mako',
parser=budget_detail,
header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"aryalemon.mail@gmail.com"
] | aryalemon.mail@gmail.com |
3113ca577299f9cc1031afc05fceafb52ffc31cc | 9479825e910f237557325fe3513f3c4d39c841de | /pmg/bills.py | 1bad8a91e470ef5762b2bc0ceec71f996ab9023d | [
"Apache-2.0"
] | permissive | lukehuang/pmg-cms-2 | d9d37d6b4fec024f5a72d0c4078c557fd730de7c | 8a259c34a9cf6a4e6dc5881855290ad1bca1bc20 | refs/heads/master | 2021-01-20T21:06:21.684800 | 2017-08-29T09:34:38 | 2017-08-29T09:34:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,714 | py | from itertools import groupby
import datetime
import os.path
import bisect
from flask import url_for
MIN_YEAR = 2006
ICONS = {
"member": "bill-introduced.png",
"committee": "committee-discussion.png",
"house": "house.png",
"president": "signed-by-president.png",
"unknown": "bill-introduced.png",
}
def get_location(event):
if event.get('type') in ['bill-signed', 'bill-act-commenced', 'bill-enacted']:
return {
'name': 'Office of the President',
'class': 'president',
}
if event.get('house'):
return {
'name': event['house']['name'],
'class': event['house']['short_name'],
}
if event.get('committee'):
if 'house' in event['committee']:
return {
'name': event['committee']['house']['name'],
'class': event['committee']['house']['short_name'],
}
return {
'name': event['committee']['name'],
'url': url_for('committee_detail', committee_id=event['committee']['id']),
'class': '',
}
return {'name': 'Unknown', 'class': ''}
def get_agent(event, bill):
info = None
if event.get('type') in ['bill-signed', 'bill-act-commenced', 'bill-enacted']:
info = {
'name': 'The President',
'type': 'president',
}
elif event.get('type') == 'bill-introduced':
info = {
'name': bill['introduced_by'] or (bill.get('place_of_introduction') or {}).get('name'),
'type': 'member',
}
elif event.get('member'):
info = {
'name': event['member']['name'],
'type': 'member',
'url': url_for('member', member_id=event['member']['id'])
}
elif event.get('committee'):
info = {
'name': event['committee']['name'],
'type': 'committee',
'url': url_for('committee_detail', committee_id=event['committee']['id'])
}
elif event.get('house'):
info = {
'name': event['house']['name'],
'type': 'house',
}
else:
info = {'name': 'Unknown', 'type': 'unknown'}
info['icon'] = ICONS[info['type']]
return info
def bill_history(bill):
""" Work out the history of a bill and return a description of it. """
history = []
events = bill.get('events', [])
events.sort(key=lambda e: [e['date'], get_location(e), get_agent(e, bill)])
for location, location_events in groupby(events, get_location):
location_history = []
for agent, agent_events in groupby(location_events, lambda e: get_agent(e, bill)):
info = {'events': list(agent_events)}
info.update(agent)
location_history.append(info)
info = {'events': location_history}
info.update(location)
history.append(info)
return history
def count_parliamentary_days(date_from, date_to):
""" Count the number of parliamentary days between two dates, inclusive.
"""
i = bisect.bisect(PARLIAMENTARY_DAYS, date_from)
j = bisect.bisect(PARLIAMENTARY_DAYS, date_to)
return j - i + 1
def load_parliamentary_days():
""" Load the dates when parliament sat from data/parliament-sitting-days.txt
This file can be updated from a spreadsheet using bin/load_parliamentary_days.py
"""
with open(os.path.join(os.path.dirname(__file__), "../data/parliament-sitting-days.txt"), "r") as f:
lines = f.readlines()
dates = [datetime.date(*(int(x) for x in d.split("-"))) for d in lines]
return sorted(dates)
PARLIAMENTARY_DAYS = load_parliamentary_days()
| [
"greg@kempe.net"
] | greg@kempe.net |
868f86a8b4a8d2b93140c412dfc8cb90f7d0cf46 | 6300fcf67d4fcb5387a9f0f7370a8ffe8f4097d9 | /AutoParts/AutoParts/vehicle/migrations/0001_initial.py | 25670b28e9b21a438a2e658bc4a5e4d008468c93 | [] | no_license | Borislav-source/Final-Project | e34ac1cbb71e3a32ed490361d3583c2e1e8bfbc9 | 501b258d103c2e1b8947451f4bdf750709d040fd | refs/heads/master | 2023-07-17T15:03:19.390774 | 2021-09-01T14:06:09 | 2021-09-01T14:06:09 | 393,977,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | # Generated by Django 3.2.6 on 2021-08-11 11:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EngineModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('engine', models.CharField(max_length=10)),
('power', models.CharField(max_length=10)),
('engine_code', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15)),
('image', models.FileField(upload_to='Manufacturers')),
],
),
migrations.CreateModel(
name='VehicleModels',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15)),
('image_url', models.URLField()),
('production_date', models.DateTimeField()),
('engine', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehicle.enginemodel')),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vehicle_type', models.CharField(choices=[('Car', 'Car'), ('Truck', 'Truck'), ('Motorcycle', 'Motorcycle')], max_length=25)),
('manufacturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehicle.manufacturer')),
('model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vehicle.vehiclemodels')),
],
),
]
| [
"tsv.borislav@gmail.com"
] | tsv.borislav@gmail.com |
9bc6ca3d85346528dc9c862965f61e85c103ce84 | b9ed8f5edf787f1a7df567a1b01086dc045427ba | /official/projects/video_ssl/dataloaders/video_ssl_input.py | 240b397d18bcf766cf557912984b6cc38a4cae35 | [
"Apache-2.0"
] | permissive | stjordanis/models | 787183f973f8cd4152f328de2368dbef17376488 | 84e1f30cdb5015848cb0d9e38e5b3f0551953b7c | refs/heads/master | 2023-03-18T08:46:29.986735 | 2023-03-07T23:26:36 | 2023-03-07T23:27:43 | 143,071,287 | 0 | 0 | Apache-2.0 | 2018-07-31T21:18:06 | 2018-07-31T21:18:05 | null | UTF-8 | Python | false | false | 12,662 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for video and label datasets."""
from typing import Dict, Optional, Tuple
from absl import logging
import tensorflow as tf
from official.projects.video_ssl.configs import video_ssl as exp_cfg
from official.projects.video_ssl.ops import video_ssl_preprocess_ops
from official.vision.dataloaders import video_input
from official.vision.ops import preprocess_ops_3d
IMAGE_KEY = 'image/encoded'
LABEL_KEY = 'clip/label/index'
Decoder = video_input.Decoder
def _process_image(image: tf.Tensor,
is_training: bool = True,
is_ssl: bool = False,
num_frames: int = 32,
stride: int = 1,
num_test_clips: int = 1,
min_resize: int = 256,
crop_size: int = 224,
num_crops: int = 1,
zero_centering_image: bool = False,
seed: Optional[int] = None) -> tf.Tensor:
"""Processes a serialized image tensor.
Args:
image: Input Tensor of shape [timesteps] and type tf.string of serialized
frames.
is_training: Whether or not in training mode. If True, random sample, crop
and left right flip is used.
is_ssl: Whether or not in self-supervised pre-training mode.
num_frames: Number of frames per subclip.
stride: Temporal stride to sample frames.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggreagated in the batch dimension.
min_resize: Frames are resized so that min(height, width) is min_resize.
crop_size: Final size of the frame after cropping the resized frames. Both
height and width are the same.
num_crops: Number of crops to perform on the resized frames.
zero_centering_image: If True, frames are normalized to values in [-1, 1].
If False, values in [0, 1].
seed: A deterministic seed to use when sampling.
Returns:
Processed frames. Tensor of shape
[num_frames * num_test_clips, crop_size, crop_size, 3].
"""
# Validate parameters.
if is_training and num_test_clips != 1:
logging.warning(
'`num_test_clips` %d is ignored since `is_training` is `True`.',
num_test_clips)
# Temporal sampler.
if is_training:
# Sampler for training.
if is_ssl:
# Sample two clips from linear decreasing distribution.
image = video_ssl_preprocess_ops.sample_ssl_sequence(
image, num_frames, True, stride)
else:
# Sample random clip.
image = preprocess_ops_3d.sample_sequence(image, num_frames, True, stride)
else:
# Sampler for evaluation.
if num_test_clips > 1:
# Sample linspace clips.
image = preprocess_ops_3d.sample_linspace_sequence(image, num_test_clips,
num_frames, stride)
else:
# Sample middle clip.
image = preprocess_ops_3d.sample_sequence(image, num_frames, False,
stride)
# Decode JPEG string to tf.uint8.
image = preprocess_ops_3d.decode_jpeg(image, 3)
if is_training:
# Standard image data augmentation: random resized crop and random flip.
if is_ssl:
image_1, image_2 = tf.split(image, num_or_size_splits=2, axis=0)
image_1 = preprocess_ops_3d.random_crop_resize(
image_1, crop_size, crop_size, num_frames, 3, (0.5, 2), (0.3, 1))
image_1 = preprocess_ops_3d.random_flip_left_right(image_1, seed)
image_2 = preprocess_ops_3d.random_crop_resize(
image_2, crop_size, crop_size, num_frames, 3, (0.5, 2), (0.3, 1))
image_2 = preprocess_ops_3d.random_flip_left_right(image_2, seed)
else:
image = preprocess_ops_3d.random_crop_resize(
image, crop_size, crop_size, num_frames, 3, (0.5, 2), (0.3, 1))
image = preprocess_ops_3d.random_flip_left_right(image, seed)
else:
# Resize images (resize happens only if necessary to save compute).
image = preprocess_ops_3d.resize_smallest(image, min_resize)
# Three-crop of the frames.
image = preprocess_ops_3d.crop_image(image, crop_size, crop_size, False,
num_crops)
# Cast the frames in float32, normalizing according to zero_centering_image.
if is_training and is_ssl:
image_1 = preprocess_ops_3d.normalize_image(image_1, zero_centering_image)
image_2 = preprocess_ops_3d.normalize_image(image_2, zero_centering_image)
else:
image = preprocess_ops_3d.normalize_image(image, zero_centering_image)
# Self-supervised pre-training augmentations.
if is_training and is_ssl:
if zero_centering_image:
image_1 = 0.5 * (image_1 + 1.0)
image_2 = 0.5 * (image_2 + 1.0)
# Temporally consistent color jittering.
image_1 = video_ssl_preprocess_ops.random_color_jitter_3d(image_1)
image_2 = video_ssl_preprocess_ops.random_color_jitter_3d(image_2)
# Temporally consistent gaussian blurring.
image_1 = video_ssl_preprocess_ops.random_blur(image_1, crop_size,
crop_size, 1.0)
image_2 = video_ssl_preprocess_ops.random_blur(image_2, crop_size,
crop_size, 0.1)
image_2 = video_ssl_preprocess_ops.random_solarization(image_2)
image = tf.concat([image_1, image_2], axis=0)
image = tf.clip_by_value(image, 0., 1.)
if zero_centering_image:
image = 2 * (image - 0.5)
return image
def _postprocess_image(image: tf.Tensor,
is_training: bool = True,
is_ssl: bool = False,
num_frames: int = 32,
num_test_clips: int = 1,
num_test_crops: int = 1) -> tf.Tensor:
"""Processes a batched Tensor of frames.
The same parameters used in process should be used here.
Args:
image: Input Tensor of shape [batch, timesteps, height, width, 3].
is_training: Whether or not in training mode. If True, random sample, crop
and left right flip is used.
is_ssl: Whether or not in self-supervised pre-training mode.
num_frames: Number of frames per subclip.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggreagated in the batch dimension.
num_test_crops: Number of test crops (1 by default). If more than 1, there
are multiple crops for each clip at test time. If 1, there is a single
central crop. The crops are aggreagated in the batch dimension.
Returns:
Processed frames. Tensor of shape
[batch * num_test_clips * num_test_crops, num_frames, height, width, 3].
"""
if is_ssl and is_training:
# In this case, two clips of self-supervised pre-training are merged
# together in batch dimenstion which will be 2 * batch.
image = tf.concat(tf.split(image, num_or_size_splits=2, axis=1), axis=0)
num_views = num_test_clips * num_test_crops
if num_views > 1 and not is_training:
# In this case, multiple views are merged together in batch dimenstion which
# will be batch * num_views.
image = tf.reshape(image, [-1, num_frames] + image.shape[2:].as_list())
return image
def _process_label(label: tf.Tensor,
one_hot_label: bool = True,
num_classes: Optional[int] = None) -> tf.Tensor:
"""Processes label Tensor."""
# Validate parameters.
if one_hot_label and not num_classes:
raise ValueError(
'`num_classes` should be given when requesting one hot label.')
# Cast to tf.int32.
label = tf.cast(label, dtype=tf.int32)
if one_hot_label:
# Replace label index by one hot representation.
label = tf.one_hot(label, num_classes)
if len(label.shape.as_list()) > 1:
label = tf.reduce_sum(label, axis=0)
if num_classes == 1:
# The trick for single label.
label = 1 - label
return label
class Parser(video_input.Parser):
"""Parses a video and label dataset."""
def __init__(self,
input_params: exp_cfg.DataConfig,
image_key: str = IMAGE_KEY,
label_key: str = LABEL_KEY):
super().__init__(input_params, image_key, label_key)
self._is_ssl = input_params.is_ssl
def _parse_train_data(
self, decoded_tensors: Dict[str, tf.Tensor]
) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses data for training."""
# Process image and label.
image = decoded_tensors[self._image_key]
image = _process_image(
image=image,
is_training=True,
is_ssl=self._is_ssl,
num_frames=self._num_frames,
stride=self._stride,
num_test_clips=self._num_test_clips,
min_resize=self._min_resize,
crop_size=self._crop_size,
zero_centering_image=self._zero_centering_image)
image = tf.cast(image, dtype=self._dtype)
features = {'image': image}
label = decoded_tensors[self._label_key]
label = _process_label(label, self._one_hot_label, self._num_classes)
return features, label
def _parse_eval_data(
self, decoded_tensors: Dict[str, tf.Tensor]
) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses data for evaluation."""
image = decoded_tensors[self._image_key]
image = _process_image(
image=image,
is_training=False,
num_frames=self._num_frames,
stride=self._stride,
num_test_clips=self._num_test_clips,
min_resize=self._min_resize,
crop_size=self._crop_size,
num_crops=self._num_crops,
zero_centering_image=self._zero_centering_image)
image = tf.cast(image, dtype=self._dtype)
features = {'image': image}
label = decoded_tensors[self._label_key]
label = _process_label(label, self._one_hot_label, self._num_classes)
if self._output_audio:
audio = decoded_tensors[self._audio_feature]
audio = tf.cast(audio, dtype=self._dtype)
audio = preprocess_ops_3d.sample_sequence(
audio, 20, random=False, stride=1)
audio = tf.ensure_shape(audio, [20, 2048])
features['audio'] = audio
return features, label
def parse_fn(self, is_training):
"""Returns a parse fn that reads and parses raw tensors from the decoder.
Args:
is_training: a `bool` to indicate whether it is in training mode.
Returns:
parse: a `callable` that takes the serialized examle and generate the
images, labels tuple where labels is a dict of Tensors that contains
labels.
"""
def parse(decoded_tensors):
"""Parses the serialized example data."""
if is_training:
return self._parse_train_data(decoded_tensors)
else:
return self._parse_eval_data(decoded_tensors)
return parse
class PostBatchProcessor(object):
"""Processes a video and label dataset which is batched."""
def __init__(self, input_params: exp_cfg.DataConfig):
self._is_training = input_params.is_training
self._is_ssl = input_params.is_ssl
self._num_frames = input_params.feature_shape[0]
self._num_test_clips = input_params.num_test_clips
self._num_test_crops = input_params.num_test_crops
def __call__(self, features: Dict[str, tf.Tensor],
label: tf.Tensor) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
"""Parses a single tf.Example into image and label tensors."""
for key in ['image', 'audio']:
if key in features:
features[key] = _postprocess_image(
image=features[key],
is_training=self._is_training,
is_ssl=self._is_ssl,
num_frames=self._num_frames,
num_test_clips=self._num_test_clips,
num_test_crops=self._num_test_crops)
return features, label
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
6e5183a691303b5e623fa4b8814bc986a232de32 | 641f76328bfeb7e54f0793a18c5b7c00595b98fd | /apps/trade/utils.py | 69ea8e92f681b5743beb5f6aab42a27395c8e35c | [
"Apache-2.0"
] | permissive | lianxiaopang/camel-store-api | 1d16060af92eb01607757c0423377a8c94c3a726 | b8021250bf3d8cf7adc566deebdba55225148316 | refs/heads/master | 2020-12-29T13:23:18.118617 | 2020-02-09T08:38:53 | 2020-02-09T08:38:53 | 238,621,246 | 0 | 0 | Apache-2.0 | 2020-02-07T14:28:35 | 2020-02-06T06:17:47 | Python | UTF-8 | Python | false | false | 10,574 | py | import datetime
import re
from decimal import Decimal
from django.utils import timezone
from django.db.models import Sum
from django.conf import settings
from apps.config.models import Marketing, BoolConfig
from apps.account.models import WxUserAccountLog, WxUserCreditLog
from apps.goods.models import GoodType, ReplGoodsType
from wxapp.models import WxUser
from apps.group_buy.models import PtGroup
from apps.refund.utils import create_refund
from apps.user.utils import remind_new_order
from apps.sms.models import SmsSwitch
from apps.sms.send_sms import send_sms
from .models import Orders, Items, BuyerCode, GoodsBackup
# 支付后结算用户是否满足推广与返利门槛
def validate_marketing_requirement(user):
if user.rebate_right != user.NULL and user.bonus_right != user.NULL:
return
order = Orders.objects.filter(user=user, status__in=[Orders.HAS_PAID, Orders.RECEIVING, Orders.DONE, Orders.SERVING])\
.aggregate(real_amount=Sum('real_amount'), asset_pay=Sum('asset_pay'), rcharge_pay=Sum('recharge_pay'))
total = sum([order.get('real_amount') if order.get('real_amount') else 0,
order.get('asset_pay') if order.get('asset_pay') else 0,
order.get('recharge_pay') if order.get('recharge_pay') else 0])
if Marketing.get_value('rebate') <= total and user.rebate_right == user.NULL:
user.rebate_right = user.TRUE
if Marketing.get_value('bonus') <= total and user.bonus_right == user.NULL:
user.bonus_right = user.TRUE
user.save()
def order_pay(order, trade_no=None):
# 订单支付
if order.status == order.CLOSE: # 避免出现订单已关闭后支付的情况
create_refund(order)
return
if order.status != order.PAYING:
return "该订单已支付"
order.trade_no = trade_no
order.pay_time = timezone.now()
if order.is_pt:
order.status = order.GROUPBUY
order.save()
if not order.pt_group.all(): # 开团订单,支付后建立团
goods_backup = order.goods_backup.first()
pt = PtGroup.create(order.user, goods_backup.goods, order.shop)
pt.order.add(order)
pt.partake.add(order.user)
elif order.pt_group.all() and not order.pt_group.filter(status=PtGroup.BUILD):
# 参团订单支付后团已经结算,退款
order.status = order.CLOSE
order.save()
create_refund(order)
else:
for pt in order.pt_group.filter(status=PtGroup.BUILD):
pt.partake.add(order.user)
else:
if order.model_type in [order.ORD, order.REPL]:
order.status = order.HAS_PAID
order.items.update(send_type=Items.SENDING)
order.save()
remind_new_order(order.shop)
validate_marketing_requirement(order.user)
def order_cancel(order):
# 订单关闭
if order.is_pt:
for pt in order.pt_group.all():
pt.order.remove(order)
pt.partake.remove(order.user)
order.is_pt = False
order.status = order.CLOSE
order.items.update(send_type=Items.CLOSE) # 关闭子订单
for goods in order.goods_backup.all(): # 返还库存
if order.model_type == order.ORD:
gtype = GoodType.objects.filter(id=goods.gtype_id).first()
if order.model_type == order.REPL:
gtype = ReplGoodsType.objects.filter(id=goods.gtype_id).first()
if gtype:
gtype.stock += goods.num
gtype.save()
if order.asset_pay > 0 or order.recharge_pay > 0:
WxUserAccountLog.record(order.user, WxUserAccountLog.USE_RETURN, asset=order.asset_pay,
balance=order.recharge_pay, remark='取消订单退还', note=f'订单:{order.order_sn}', number=order.order_sn)
if order.credit > 0:
WxUserCreditLog.record(order.user, WxUserCreditLog.REPLACEMENT_RETURN, credit=order.credit, number=order.order_sn,
remark='订单未支付返还积分')
order.save()
def compute_amount(order, order_amount, postage_total, use_wallet=False):
# 计算订单的金额
order.order_amount = order_amount
order.postage_total = postage_total
if use_wallet and BoolConfig.get_bool('wallet_switch'):
total = order.order_amount + order.postage_total
account = getattr(order.user, 'account', None)
asset = getattr(account, 'asset', Decimal('0'))
recharge = getattr(account, 'recharge', Decimal('0'))
if total < asset:
order.asset_pay = total
order.real_amount = 0
elif total > asset and total < asset + recharge:
order.asset_pay = asset
order.recharge_pay = total - asset
order.real_amount = 0
elif total >= asset + recharge:
order.asset_pay = asset
order.recharge_pay = recharge
order.real_amount = total - asset - recharge
remark = '线下支付' if order.model_type == order.QRPAY else '商品购买'
number = order.order_sn
WxUserAccountLog.record(order.user, WxUserAccountLog.USE, asset=order.asset_pay,
balance=order.recharge_pay, remark=remark, note=f'订单:{order.order_sn}', number=number)
else:
order.real_amount = order.order_amount + order.postage_total
order.save()
return order
def order_validate_all_send(order):
# 检查订单的子订单是否全部发货, 全发后状态改为待收货
if not order.items.filter(send_type=Items.SENDING):
order.status = order.RECEIVING
order.send_time = timezone.now()
order.flag_time = timezone.now() + timezone.timedelta(days=7) # 发货七天后自动收货
order.save()
def order_done(order):
# 订单确认收货
order.status = order.DONE
order.receive_time = timezone.now()
order.save()
for item in order.items.all(): # 订单确认收货,每个商品计算返利等
item_confirm_receipt(item)
def calc_asset(item):
# 计算分销和返利的
""" 分销 """
goods_info = item.goods_backup
if BoolConfig.get_bool('bonus_switch'):
direct_relation = None
if getattr(item.order.user, 'referrer_relations', None):
direct_relation = item.order.user.referrer_relations.user # 上线
if direct_relation and direct_relation.has_bonus_right and goods_info.g_bonus > 0:
WxUserAccountLog.record(direct_relation, WxUserAccountLog.BONUS, asset=goods_info.g_bonus * goods_info.num,
referral=item.order.user,
number=item.order_sn, remark='成员消费返利', note=item.order.user.nickname,
cost=goods_info.price * goods_info.num)
""" 分享返积分返利 """
if goods_info.g_rebate <= 0 or not BoolConfig.get_bool('rebate_switch'):
return False
if not goods_info.share_user_id:
return False
share_user = WxUser.objects.filter(id=int(goods_info.share_user_id)).first()
if not share_user or not share_user.has_rebate_right:
return False
WxUserCreditLog.record(share_user, WxUserCreditLog.SHATE, credit=goods_info.g_rebate * goods_info.num,
number=item.order_sn, remark='商品分享返利', note=goods_info.goods_name)
def item_send(item):
# 子订单发货
if item.goods_backup.delivery_method == GoodsBackup.BUYER:
BuyerCode.create(item)
if item.order.model_type in [Orders.ORD, Orders.REPL]:
item.send_time = timezone.now()
item.send_type = item.RECEIVING
item.save()
order_validate_all_send(item.order)
delivery_notice(item)
def item_confirm_receipt(item):
# 子订单确认收货
if item.send_type == item.OVER:
return
item.send_type = item.OVER
if not item.receive_time:
item.receive_time = timezone.now()
item.save()
if item.order.model_type == Orders.ORD:
calc_asset(item)
def sub_order_done(order):
# 检查订阅订单是否完成
if not order.items.exclude(send_type=Items.OVER):
order.status = order.DONE
order.next_send = None
order.receive_time = timezone.now()
order.save()
def arrive(item):
# 子订单送达
item.receive_time = timezone.now()
item.send_type = item.ARRIVE
item.save()
def delivery_notice(item):
"""
发货短信通知
"""
if not SmsSwitch.get('delivery_notice'):
return
purchaser_phone = None
receiver_phone = None
try:
receiver_phone = item.order.delivery_address.mobile_phone
purchaser_phone = item.order.user.info.phone
except AttributeError as e:
print(e)
if receiver_phone == purchaser_phone: # 手机号一致,只发给收件人
purchaser_phone = None
if purchaser_phone:
data = {"member": item.order.user.nickname,
"commodity": item.goods_backup.goods_name,
"business": getattr(settings, 'SHOP_NAME')}
send_sms(purchaser_phone, data, 'SMS_157100140')
if receiver_phone:
#根据不同配送方式发送不同的通知短信
data = {
"receiver": item.order.delivery_address.sign_name,
"commodity": item.goods_backup.goods_name,
"business": getattr(settings, 'SHOP_NAME'),
}
if item.goods_backup.delivery_method == 'own': # 自配送
send_sms(receiver_phone, data, 'SMS_157100062')
if item.goods_backup.delivery_method == 'express': # 快递物流
data.update({
"expresscompany": item.express_company,
"trackingnumber": item.express_num,
})
send_sms(receiver_phone, data, 'SMS_157100132')
if item.goods_backup.delivery_method == 'buyer': # 自提
data.update({
"address": item.order.shop.address,
"store": item.order.shop.name,
})
send_sms(receiver_phone, data, 'SMS_157100093')
def parseAddress(address):
levels = ['省|市|区', '市|区', '市|区|县']
patterns = [
f'(\\S[^ {levels[0]}]+[{levels[0]}])',
f'(\\S[^ {levels[1]}]+[{levels[1]}])',
f'(\\S[^ {levels[2]}]+[{levels[2]}])',
f'(\\S*)',
]
results = re.match(''.join(patterns), address)
return results.groups() if results is not None else []
| [
"lyh@gzqichang.com"
] | lyh@gzqichang.com |
0c2ef62587a7453de98816ec30eaa6e455911f59 | d14ca036f35c8f1026d6f01df40f90358cddd331 | /meshio/vtu_io.py | e0737b13d1df2084a84ab1ec9db8f56ed2a5338c | [
"MIT"
] | permissive | luohancfd/meshio | c3af9b869507013fd047be5a88b20dd50e1f0dda | acc40c2ccbcdbc002abe02fa0889c7b06b3af17d | refs/heads/master | 2020-03-28T11:58:22.213400 | 2018-09-07T19:23:32 | 2018-09-07T19:23:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,592 | py | # -*- coding: utf-8 -*-
#
"""
I/O for VTU.
"""
import base64
import logging
try:
from StringIO import cStringIO as BytesIO
except ImportError:
from io import BytesIO
import sys
import zlib
import numpy
from .__about__ import __version__
from .mesh import Mesh
from .vtk_io import vtk_to_meshio_type, meshio_to_vtk_type, raw_from_cell_data
from .common import num_nodes_per_cell
def num_bytes_to_num_base64_chars(num_bytes):
# Rounding up in integer division works by double negation since Python
# always rounds down.
return -(-num_bytes // 3) * 4
def _cells_from_data(connectivity, offsets, types, cell_data_raw):
# Translate it into the cells dictionary.
# `connectivity` is a one-dimensional vector with
# (p0, p1, ... ,pk, p10, p11, ..., p1k, ...
# Collect types into bins.
# See <https://stackoverflow.com/q/47310359/353337> for better
# alternatives.
uniques = numpy.unique(types)
bins = {u: numpy.where(types == u)[0] for u in uniques}
assert len(offsets) == len(types)
cells = {}
cell_data = {}
for tpe, b in bins.items():
meshio_type = vtk_to_meshio_type[tpe]
n = num_nodes_per_cell[meshio_type]
# The offsets point to the _end_ of the indices
indices = numpy.add.outer(offsets[b], numpy.arange(-n, 0))
cells[meshio_type] = connectivity[indices]
cell_data[meshio_type] = {key: value[b] for key, value in cell_data_raw.items()}
return cells, cell_data
vtu_to_numpy_type = {
"Float32": numpy.dtype(numpy.float32),
"Float64": numpy.dtype(numpy.float64),
"Int8": numpy.dtype(numpy.int8),
"Int16": numpy.dtype(numpy.int16),
"Int32": numpy.dtype(numpy.int32),
"Int64": numpy.dtype(numpy.int64),
"UInt8": numpy.dtype(numpy.uint8),
"UInt16": numpy.dtype(numpy.uint16),
"UInt32": numpy.dtype(numpy.uint32),
"UInt64": numpy.dtype(numpy.uint64),
}
numpy_to_vtu_type = {v: k for k, v in vtu_to_numpy_type.items()}
class VtuReader(object):
"""Helper class for reading VTU files. Some properties are global to the
file (e.g., byte_order), and instead of passing around these parameters,
make them properties of this class.
"""
def __init__(self, filename):
from lxml import etree as ET
points = None
point_data = {}
cell_data_raw = {}
cells = {}
field_data = {}
# libxml2 and with it lxml have a safety net for memory overflows; see,
# e.g., <https://stackoverflow.com/q/33828728/353337>.
# This causes the error
# ```
# cannot parse large files and instead throws the exception
#
# lxml.etree.XMLSyntaxError: xmlSAX2Characters: huge text node, [...]
# ```
# Setting huge_tree=True removes the limit. Another alternative would
# be to use Python's native xml parser to avoid this error,
# import xml.etree.cElementTree as ET
parser = ET.XMLParser(remove_comments=True, huge_tree=True)
tree = ET.parse(filename, parser)
root = tree.getroot()
assert root.tag == "VTKFile"
assert root.attrib["type"] == "UnstructuredGrid"
assert root.attrib["version"] in [
"0.1",
"1.0",
], "Unknown VTU file version '{}'.".format(root.attrib["version"])
try:
assert root.attrib["compressor"] == "vtkZLibDataCompressor"
except KeyError:
pass
self.header_type = (
root.attrib["header_type"] if "header_type" in root.attrib else "UInt32"
)
try:
self.byte_order = root.attrib["byte_order"]
assert self.byte_order in [
"LittleEndian",
"BigEndian",
], "Unknown byte order '{}'.".format(self.byte_order)
except KeyError:
self.byte_order = None
grid = None
self.appended_data = None
for c in root:
if c.tag == "UnstructuredGrid":
assert grid is None, "More than one UnstructuredGrid found."
grid = c
else:
assert c.tag == "AppendedData", "Unknown main tag '{}'.".format(c.tag)
assert self.appended_data is None, "More than one AppendedData found."
assert c.attrib["encoding"] == "base64"
self.appended_data = c.text.strip()
# The appended data always begins with a (meaningless)
# underscore.
assert self.appended_data[0] == "_"
self.appended_data = self.appended_data[1:]
assert grid is not None, "No UnstructuredGrid found."
piece = None
for c in grid:
if c.tag == "Piece":
assert piece is None, "More than one Piece found."
piece = c
else:
assert c.tag == "FieldData", "Unknown grid subtag '{}'.".format(c.tag)
# TODO test field data
for data_array in c:
field_data[data_array.attrib["Name"]] = self.read_data(data_array)
assert piece is not None, "No Piece found."
num_points = int(piece.attrib["NumberOfPoints"])
num_cells = int(piece.attrib["NumberOfCells"])
for child in piece:
if child.tag == "Points":
data_arrays = list(child)
assert len(data_arrays) == 1
data_array = data_arrays[0]
assert data_array.tag == "DataArray"
points = self.read_data(data_array)
num_components = int(data_array.attrib["NumberOfComponents"])
points = points.reshape(num_points, num_components)
elif child.tag == "Cells":
for data_array in child:
assert data_array.tag == "DataArray"
cells[data_array.attrib["Name"]] = self.read_data(data_array)
assert len(cells["offsets"]) == num_cells
assert len(cells["types"]) == num_cells
elif child.tag == "PointData":
for c in child:
assert c.tag == "DataArray"
point_data[c.attrib["Name"]] = self.read_data(c)
else:
assert child.tag == "CellData", "Unknown tag '{}'.".format(child.tag)
for c in child:
assert c.tag == "DataArray"
cell_data_raw[c.attrib["Name"]] = self.read_data(c)
assert points is not None
assert "connectivity" in cells
assert "offsets" in cells
assert "types" in cells
cells, cell_data = _cells_from_data(
cells["connectivity"], cells["offsets"], cells["types"], cell_data_raw
)
self.points = points
self.cells = cells
self.point_data = point_data
self.cell_data = cell_data
self.field_data = field_data
return
def read_binary(self, data, data_type):
# first read the the block size; it determines the size of the header
dtype = vtu_to_numpy_type[self.header_type]
num_bytes_per_item = numpy.dtype(dtype).itemsize
num_chars = num_bytes_to_num_base64_chars(num_bytes_per_item)
byte_string = base64.b64decode(data[:num_chars])[:num_bytes_per_item]
num_blocks = numpy.frombuffer(byte_string, dtype)[0]
# read the entire header
num_header_items = 3 + num_blocks
num_header_bytes = num_bytes_per_item * num_header_items
num_header_chars = num_bytes_to_num_base64_chars(num_header_bytes)
byte_string = base64.b64decode(data[:num_header_chars])
header = numpy.frombuffer(byte_string, dtype)
# num_blocks = header[0]
# max_uncompressed_block_size = header[1]
# last_compressed_block_size = header[2]
block_sizes = header[3:]
# Read the block data
byte_array = base64.b64decode(data[num_header_chars:])
dtype = vtu_to_numpy_type[data_type]
num_bytes_per_item = numpy.dtype(dtype).itemsize
byte_offsets = numpy.empty(block_sizes.shape[0] + 1, dtype=block_sizes.dtype)
byte_offsets[0] = 0
numpy.cumsum(block_sizes, out=byte_offsets[1:])
# process the compressed data
block_data = numpy.concatenate(
[
numpy.frombuffer(
zlib.decompress(byte_array[byte_offsets[k] : byte_offsets[k + 1]]),
dtype=dtype,
)
for k in range(num_blocks)
]
)
return block_data
def read_data(self, c):
if c.attrib["format"] == "ascii":
# ascii
data = numpy.array(
c.text.split(), dtype=vtu_to_numpy_type[c.attrib["type"]]
)
elif c.attrib["format"] == "binary":
data = self.read_binary(c.text.strip(), c.attrib["type"])
else:
# appended data
assert c.attrib["format"] == "appended", "Unknown data format '{}'.".format(
c.attrib["format"]
)
offset = int(c.attrib["offset"])
data = self.read_binary(self.appended_data[offset:], c.attrib["type"])
if "NumberOfComponents" in c.attrib:
data = data.reshape(-1, int(c.attrib["NumberOfComponents"]))
return data
def read(filename):
reader = VtuReader(filename)
return Mesh(
reader.points,
reader.cells,
point_data=reader.point_data,
cell_data=reader.cell_data,
field_data=reader.field_data,
)
def _chunk_it(array, n):
out = []
k = 0
while k * n < len(array):
out.append(array[k * n : (k + 1) * n])
k += 1
return out
def write(filename, mesh, write_binary=True, pretty_xml=True):
from lxml import etree as ET
if not write_binary:
logging.warning("VTU ASCII files are only meant for debugging.")
if mesh.points.shape[1] == 2:
logging.warning(
"VTU requires 3D points, but 2D points given. "
"Appending 0 third component."
)
mesh.points = numpy.column_stack(
[mesh.points[:, 0], mesh.points[:, 1], numpy.zeros(mesh.points.shape[0])]
)
header_type = "UInt32"
vtk_file = ET.Element(
"VTKFile",
type="UnstructuredGrid",
version="0.1",
# Use the native endianness. Not strictly necessary, but this
# simplifies things a bit.
byte_order=("LittleEndian" if sys.byteorder == "little" else "BigEndian"),
header_type=header_type,
compressor="vtkZLibDataCompressor",
)
# swap the data to match the system byteorder
# Don't use byteswap to make sure that the dtype is changed; see
# <https://github.com/numpy/numpy/issues/10372>.
points = mesh.points.astype(mesh.points.dtype.newbyteorder("="))
for data in mesh.point_data.values():
data = data.astype(data.dtype.newbyteorder("="))
for data in mesh.cell_data.values():
for dat in data.values():
dat = dat.astype(dat.dtype.newbyteorder("="))
for data in mesh.field_data.values():
data = data.astype(data.dtype.newbyteorder("="))
def numpy_to_xml_array(parent, name, fmt, data):
da = ET.SubElement(
parent, "DataArray", type=numpy_to_vtu_type[data.dtype], Name=name
)
if len(data.shape) == 2:
da.set("NumberOfComponents", "{}".format(data.shape[1]))
if write_binary:
da.set("format", "binary")
max_block_size = 32768
data_bytes = data.tostring()
blocks = _chunk_it(data_bytes, max_block_size)
num_blocks = len(blocks)
last_block_size = len(blocks[-1])
compressed_blocks = [zlib.compress(block) for block in blocks]
# collect header
header = numpy.array(
[num_blocks, max_block_size, last_block_size]
+ [len(b) for b in compressed_blocks],
dtype=vtu_to_numpy_type[header_type],
)
da.text = (
base64.b64encode(header.tostring())
+ base64.b64encode(b"".join(compressed_blocks))
).decode()
else:
da.set("format", "ascii")
s = BytesIO()
numpy.savetxt(s, data.flatten(), fmt)
da.text = s.getvalue().decode()
return
comment = ET.Comment("This file was created by meshio v{}".format(__version__))
vtk_file.insert(1, comment)
grid = ET.SubElement(vtk_file, "UnstructuredGrid")
total_num_cells = sum([len(c) for c in mesh.cells.values()])
piece = ET.SubElement(
grid,
"Piece",
NumberOfPoints="{}".format(len(points)),
NumberOfCells="{}".format(total_num_cells),
)
# points
if points is not None:
pts = ET.SubElement(piece, "Points")
numpy_to_xml_array(pts, "Points", "%.11e", points)
if mesh.cells is not None:
cls = ET.SubElement(piece, "Cells")
# create connectivity, offset, type arrays
connectivity = numpy.concatenate(
[numpy.concatenate(v) for v in mesh.cells.values()]
)
# offset (points to the first element of the next cell)
offsets = [
v.shape[1] * numpy.arange(1, v.shape[0] + 1) for v in mesh.cells.values()
]
for k in range(1, len(offsets)):
offsets[k] += offsets[k - 1][-1]
offsets = numpy.concatenate(offsets)
# types
types = numpy.concatenate(
[numpy.full(len(v), meshio_to_vtk_type[k]) for k, v in mesh.cells.items()]
)
numpy_to_xml_array(cls, "connectivity", "%d", connectivity)
numpy_to_xml_array(cls, "offsets", "%d", offsets)
numpy_to_xml_array(cls, "types", "%d", types)
if mesh.point_data:
pd = ET.SubElement(piece, "PointData")
for name, data in mesh.point_data.items():
numpy_to_xml_array(pd, name, "%.11e", data)
if mesh.cell_data:
cd = ET.SubElement(piece, "CellData")
for name, data in raw_from_cell_data(mesh.cell_data).items():
numpy_to_xml_array(cd, name, "%.11e", data)
write_xml(filename, vtk_file, pretty_xml)
return
def write_xml(filename, root, pretty_print=False):
from lxml import etree as ET
tree = ET.ElementTree(root)
tree.write(filename, pretty_print=pretty_print)
return
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com |
aa5977428acf1b3da665912ae0e0285b48cc5ba3 | 79f541042e4b4d6bb443e7a758ca918817ea0f33 | /Pygame/09_PythonGameDevelopment.py | 93dd9e7f40723bcdebe22646355eeeccb814bb5a | [] | no_license | ashutoshm1771/Source-Code-from-Tutorials | d5f950db8f5f648e87303835e9558eeba404939a | f5552d4bd0f4bebcf5c674ff730fcb61f2d7a1ce | refs/heads/master | 2020-09-15T06:08:31.777622 | 2019-11-22T09:08:31 | 2019-11-22T09:08:31 | 223,364,275 | 4 | 0 | null | 2019-11-22T09:01:51 | 2019-11-22T09:01:48 | null | UTF-8 | Python | false | false | 789 | py | import pygame
pygame.init()
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
gameDisplay = pygame.display.set_mode((800,600))
pygame.display.set_caption('Slither')
gameExit = False
lead_x = 300
lead_y = 300
lead_x_change = 0
clock = pygame.time.Clock()
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
lead_x_change = -10
if event.key == pygame.K_RIGHT:
lead_x_change = 10
lead_x += lead_x_change
gameDisplay.fill(white)
pygame.draw.rect(gameDisplay, black, [lead_x,lead_y,10,10])
pygame.display.update()
clock.tick(15)
pygame.quit()
quit()
| [
"buckyroberts@gmail.com"
] | buckyroberts@gmail.com |
1c383a35c5f3d2a314d4952ad778c6e5be01641a | 4edb067c8c748e503e154bb2b9190843f6f1684a | /tests/test_text/test_postag.py | 31e8bb2600ffba623f8fbf1bb2281dcae63b8a9b | [
"Apache-2.0"
] | permissive | DistrictDataLabs/yellowbrick-docs-zh | 5ecbdccfaff4a6822d60250719b37af9b8d37f61 | 3118e67f2bed561a00885e6edb2cabb3520ad66b | refs/heads/master | 2021-04-09T11:00:29.709555 | 2019-04-06T15:23:55 | 2019-04-06T15:23:55 | 125,447,764 | 22 | 5 | Apache-2.0 | 2019-04-06T14:52:40 | 2018-03-16T01:37:09 | Python | UTF-8 | Python | false | false | 2,190 | py | # -*- coding: utf8 -*-
# tests.test_text.test_postag
# Tests for the part-of-speech tagging visualization
#
# Author: Rebecca Bilbro <rbilbro@districtdatalabs.com>
# Created: 2017-03-22 15:46
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_postag.py [bd9cbb9] rebecca.bilbro@bytecubed.com $
"""
Tests for the part-of-speech tagging visualization
"""
##########################################################################
## Imports
##########################################################################
import pytest
from yellowbrick.text.postag import *
try:
import nltk
from nltk import pos_tag, word_tokenize
except ImportError:
nltk = None
##########################################################################
## Data
##########################################################################
pie = """
In a small saucepan, combine sugar and eggs
until well blended. Cook over low heat, stirring
constantly, until mixture reaches 160° and coats
the back of a metal spoon. Remove from the heat.
Stir in chocolate and vanilla until smooth. Cool
to lukewarm (90°), stirring occasionally. In a small
bowl, cream butter until light and fluffy. Add cooled
chocolate mixture; beat on high speed for 5 minutes
or until light and fluffy. In another large bowl,
beat cream until it begins to thicken. Add
confectioners' sugar; beat until stiff peaks form.
Fold into chocolate mixture. Pour into crust. Chill
for at least 6 hours before serving. Garnish with
whipped cream and chocolate curls if desired.
"""
##########################################################################
## PosTag Tests
##########################################################################
class TestPosTag(object):
"""
PosTag (Part of Speech Tagging Visualizer) Tests
"""
@pytest.mark.skipif(nltk is None, reason="test requires nltk")
def test_integrated_postag(self):
"""
Assert no errors occur during postag integration
"""
tokens = word_tokenize(pie)
tagged = pos_tag(tokens)
visualizer = PosTagVisualizer()
visualizer.transform(tagged)
| [
"benjamin@bengfort.com"
] | benjamin@bengfort.com |
230af37383f1ca4090d9c867135ee998803d7b1c | d7ca36f20465870e67e7d6832f8e1b8348af12fc | /calculate/txt/cal_existed_ratio.py | 7984c110ad59c21e6693e043ff37b090a8839091 | [] | no_license | hlcr/LanguageNetworkAnalysis | c109e670534367c782fb71697a92a3ca95aba098 | 65f6c8086f3e4282b15359cc99cf57a682e6b814 | refs/heads/master | 2020-04-24T07:40:04.100213 | 2020-04-17T09:02:05 | 2020-04-17T09:02:05 | 171,805,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,282 | py | import tool.util as util
import networkx as nx
import os
import copy
from collections import OrderedDict
from collections import Counter
def get_speical_dict(w_dict):
r_dict = OrderedDict()
index = 0
stop_value = 0
for k,v in w_dict.items():
if v == 1 or v < stop_value:
break
r_dict[k] = v
index += 1
if index == 50:
stop_value = v
return r_dict
def calculate_existed_ratio(g1, g2, d1):
gg = get_speical_dict(g1)
s1 = set(gg.keys())
# s1 = set(gg.keys()) & set(nx.k_core(d1).nodes())
s2 = set(g2.keys())
s3 = s1 & s2
# return str(len(s1))+"\t"+str(len(s3))
if len(s1) == 0:
return 0
return str(len(s3)/len(s1))
# 计算最大公共子图的比率
# pkl_dir: pkl 所在的目录
# mcs_dir: 结果生成的目录
# is_front: 是否跟前面的比较
# key_word:关键词
# lap: 步长
def loop_compare(com_function, keyword_list, pkl_dir1, result_dir, mode=1, lap=1, type="pkl"):
for key in keyword_list:
print(key)
if mode == 0:
util.create_directory(result_dir + key + "//")
pkl_dir = pkl_dir1.format(key)
f_list = util.get_file_list(pkl_dir, '.txt')
os.chdir(pkl_dir)
result_list = []
# 升序排序
nw_list = sorted(f_list)
ii = len(nw_list)-1
while ii - 2*lap >= 0:
g2 = util.txt2dict(util.get_list_from_file(nw_list[ii]))
# 迭代生成子图
k = 1
while k < lap:
g2 = nx.compose(g2, util.get_nw(nw_list[ii - k]))
k += 1
ii -= lap
g1 = util.txt2dict(util.get_list_from_file(nw_list[ii]))
d1 = util.get_nw("D:\semantic analysis\新结果\去虚词去单字共现网络//{0}//p//".format(key)+nw_list[ii].split(".")[0]+".pkl")
# 迭代生成子图
k = 1
while k < lap:
g1 = nx.compose(g1, util.get_nw(nw_list[ii - k]))
k += 1
# 生成连通子图
if mode == 1:
r1, r2 = com_function(copy.deepcopy(g1), copy.deepcopy(g2))
result_list.append(nw_list[ii + lap][0:-4] + "\t" + str(r1))
result_list.append((nw_list[ii][0:-4] + "\t" + str(r2)))
elif mode == 0:
result_list = com_function(copy.deepcopy(g1), copy.deepcopy(g2))
util.save_file(result_dir + key + "//" + nw_list[ii + lap][0:-4] + ".txt", result_list)
elif mode == 2:
r1 = com_function(copy.deepcopy(g1), copy.deepcopy(g2), d1)
# result_list.append(str(r1))
result_list.append(nw_list[ii + lap][0:-4] + "\t" + str(r1))
ii -= lap
if mode != 0:
result_list.reverse()
util.save_file(result_dir+key+".txt", result_list)
key_list = ["美好","无聊"]
pkl_dir = r"D:\semantic analysis\新结果\去重去虚词去单字词频数\{0}//"
result_dir = r"D:\semantic analysis\新结果\去虚词去单字共现网络最大频率全图节点保留比例//"
loop_compare(calculate_existed_ratio, key_list, pkl_dir, result_dir, 2, 1)
# loop_compare(same_node_degree, key_list, pkl_dir, result_dir, 0)
| [
"hongliryan@gmail.com"
] | hongliryan@gmail.com |
acabbba519a046ce16017002f4e7c7e7f0addf34 | 718f96baef2f8693d9ddf8199e2d6352b2003d60 | /wsib/urls.py | 37fc241159db847a2bd6f93c4d7727167cb92f1b | [] | no_license | Sandaru95/wsib | 95daa08c5352504a564e27c3f3eabbef7e44c39d | 1a55030caad57ce3cc59e7bae9bffc3a7fbabaff | refs/heads/master | 2020-09-13T03:27:26.762096 | 2019-11-20T21:59:43 | 2019-11-20T21:59:43 | 222,643,736 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.ReturnToHomeView.as_view()),
path('home/', include('home.urls')),
path('detail/', include('detail.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"unknown@example.com"
] | unknown@example.com |
de0d500cff740ded2b8b65ed5b9fa0c2b6958890 | fa0c53ac2a91409eaf0fc7c082a40caae3ffa0d8 | /com/lc/demoLearnPython/python_coroutine.py | 440f38100a78c152f46f97d2175aa78035aae963 | [] | no_license | ahviplc/pythonLCDemo | aba6d8deb1e766841461bd772560d1d50450057b | 22f149600dcfd4d769e9f74f1f12e3c3564e88c2 | refs/heads/master | 2023-07-24T01:41:59.791913 | 2023-07-07T02:32:45 | 2023-07-07T02:32:45 | 135,969,516 | 7 | 2 | null | 2023-02-02T03:24:14 | 2018-06-04T04:12:49 | Python | UTF-8 | Python | false | false | 3,443 | py | # _*_ coding: utf-8 _*_
"""
python_coroutine.py
Version: 0.1
Author: LC
DateTime: 2018年11月28日11:10:11
一加壹博客最Top-一起共创1+1>2的力量!~LC
LC博客url: http://oneplusone.top/index.html
http://oneplusone.vip/index.html
"""
import asyncio
import aiohttp
import threading
# 生产者、消费者例子
def consumer(): # 定义消费者,由于有yeild关键词,此消费者为一个生成器
print("[Consumer] Init Consumer ......")
r = "init ok" # 初始化返回结果,并在启动消费者时,返回给生产者
while True:
n = yield r # 消费者通过yield关键词接收生产者产生的消息,同时返回结果给生产者
print("[Consumer] conusme n = %s, r = %s" % (n, r))
r = "consume %s OK" % n # 消费者消费结果,下个循环返回给生产者
def produce(c): # 定义生产者,此时的 c 为一个生成器
print("[Producer] Init Producer ......")
r = c.send(None) # 启动消费者生成器,同时第一次接收返回结果
print("[Producer] Start Consumer, return %s" % r)
n = 0
while n < 5:
n += 1
print("[Producer] While, Producing %s ......" % n)
r = c.send(n) # 向消费者发送消息,同时准备接收结果。此时会切换到消费者执行
print("[Producer] Consumer return: %s" % r)
c.close() # 关闭消费者生成器
print("[Producer] Close Producer ......")
# produce(consumer())
# 异步IO例子:适配Python3.4,使用asyncio库
@asyncio.coroutine
def hello(index): # 通过装饰器asyncio.coroutine定义协程
print('Hello world! index=%s, thread=%s' % (index, threading.currentThread()))
yield from asyncio.sleep(1) # 模拟IO任务
print('Hello again! index=%s, thread=%s' % (index, threading.currentThread()))@asyncio.coroutine
loop = asyncio.get_event_loop() # 得到一个事件循环模型
tasks = [hello(1), hello(2)] # 初始化任务列表
loop.run_until_complete(asyncio.wait(tasks)) # 执行任务
loop.close() # 关闭事件循环列表
# 异步IO例子:适配Python3.5,使用async和await关键字
async def hello1(index): # 通过关键字async定义协程
print('Hello world! index=%s, thread=%s' % (index, threading.currentThread()))
await asyncio.sleep(1) # 模拟IO任务
print('Hello again! index=%s, thread=%s' % (index, threading.currentThread()))
loop = asyncio.get_event_loop() # 得到一个事件循环模型
tasks = [hello1(1), hello1(2)] # 初始化任务列表
loop.run_until_complete(asyncio.wait(tasks)) # 执行任务
loop.close() # 关闭事件循环列表
# aiohttp 实例
async def get(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
print(url, resp.status)
print(url, await resp.text())
loop = asyncio.get_event_loop() # 得到一个事件循环模型
tasks = [ # 初始化任务列表
get("http://zhushou.360.cn/detail/index/soft_id/3283370"),
get("http://zhushou.360.cn/detail/index/soft_id/3264775"),
get("http://zhushou.360.cn/detail/index/soft_id/705490")
]
loop.run_until_complete(asyncio.wait(tasks)) # 执行任务
loop.close() # 关闭事件循环列表
| [
"ahlc@sina.cn"
] | ahlc@sina.cn |
c4ba986be18b469faf2ba1f5a3a4d5d81c6b8e66 | 788e275792f21d8b62334cddd718e6dfa347a7e2 | /citizensnb/pipelines.py | 1612d1a3090ba3dbbdeb99e6266a69d83dccd121 | [] | no_license | daniel-kanchev/citizensnb | 7d99e4e6d976cc173c4ec2fa9c9ec31bdf89a750 | 53fc15778834a3d415fa0eaf41ceed4f9a85243e | refs/heads/main | 2023-04-01T22:33:09.936186 | 2021-04-08T11:56:58 | 2021-04-08T11:56:58 | 355,888,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | from itemadapter import ItemAdapter
import sqlite3
class DatabasePipeline:
# Database setup
conn = sqlite3.connect('citizensnb.db')
c = conn.cursor()
def open_spider(self, spider):
self.c.execute(""" DROP TABLE IF EXISTS articles """)
self.c.execute(""" CREATE TABLE articles (
title text,
date text,
link text,
content text
) """)
def process_item(self, item, spider):
# Insert values
self.c.execute("INSERT INTO articles ("
"title, "
"date, "
"link, "
"content)"
" VALUES (?,?,?,?)",
(item.get('title'),
item.get('date'),
item.get('link'),
item.get('content')
))
if 'link' in item.keys():
print(f"New Article: {item['link']}")
else:
print(f"New Article: {item['title']}")
self.conn.commit() # commit after every entry
return item
def close_spider(self, spider):
self.conn.commit()
self.conn.close() | [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
f7e7997bb9af7f47fab1ac86f96bfc6ad43b2dbd | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/list_sub_customer_coupons_response.py | cf9c5ebc5ddb0a9eee98ed289b96806ff56b8f60 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,441 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListSubCustomerCouponsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'user_coupons': 'list[IQueryUserCouponsResultV2]'
}
attribute_map = {
'count': 'count',
'user_coupons': 'user_coupons'
}
def __init__(self, count=None, user_coupons=None):
"""ListSubCustomerCouponsResponse - a model defined in huaweicloud sdk"""
super(ListSubCustomerCouponsResponse, self).__init__()
self._count = None
self._user_coupons = None
self.discriminator = None
if count is not None:
self.count = count
if user_coupons is not None:
self.user_coupons = user_coupons
@property
def count(self):
"""Gets the count of this ListSubCustomerCouponsResponse.
|参数名称:个数| |参数的约束及描述:个数|
:return: The count of this ListSubCustomerCouponsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListSubCustomerCouponsResponse.
|参数名称:个数| |参数的约束及描述:个数|
:param count: The count of this ListSubCustomerCouponsResponse.
:type: int
"""
self._count = count
@property
def user_coupons(self):
"""Gets the user_coupons of this ListSubCustomerCouponsResponse.
|参数名称:优惠券记录。具体请参见表 IQueryUserCouponsResult。| |参数约束以及描述:优惠券记录。具体请参见表 IQueryUserCouponsResult。|
:return: The user_coupons of this ListSubCustomerCouponsResponse.
:rtype: list[IQueryUserCouponsResultV2]
"""
return self._user_coupons
@user_coupons.setter
def user_coupons(self, user_coupons):
"""Sets the user_coupons of this ListSubCustomerCouponsResponse.
|参数名称:优惠券记录。具体请参见表 IQueryUserCouponsResult。| |参数约束以及描述:优惠券记录。具体请参见表 IQueryUserCouponsResult。|
:param user_coupons: The user_coupons of this ListSubCustomerCouponsResponse.
:type: list[IQueryUserCouponsResultV2]
"""
self._user_coupons = user_coupons
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSubCustomerCouponsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
1dce5d2fab9f4c3846f11d3dcfe047c8042d3654 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R1/benchmark/startCirq145.py | 1a54dc465638320a4a50b1bdc04f6cd82d0003f3 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.Z.on(input_qubit[3])) # number=7
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.H.on(input_qubit[0])) # number=11
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq145.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
348467fbb33b3dd2a17a50137f42632fbc2167e2 | 9a8edc31e2898b4abe3d5be57459d15237ae90b6 | /Groupe3_BD_Serveur-master/g8/g3_tf_idf_week_v2.1.py | 259b81b7b5b50c735c05d887cb81f14de1eba16b | [] | no_license | sid-ut3/watchnews | c6245c01d6d4ff840113bd39826b58da2efbef8f | 3a2e1b56acefe26f4e7d99910d002e27699da302 | refs/heads/master | 2020-04-01T17:21:47.596354 | 2018-10-17T10:03:37 | 2018-10-17T10:03:37 | 153,425,499 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,410 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 08:57:58 2018
@author: laura
"""
"""
g1
@Author : F.C
g3
@Author : L.B. M.I. A.H. C.G.
g8
@Author : F.R.
"""
import MySQLdb
from datetime import datetime, timedelta
import timestring
from flask import Flask, request, jsonify
import json
import requests
from flask_restful import Resource, Api
import mysql.connector
from flask_mysqldb import MySQL
app = Flask(__name__) # we are using this variable to use the flask
# microframework
api = Api(app)
# MySQL configurations
servername = "localhost"
username = "root"
passwordDB = ""
databasename = "bdd_test"
db = MySQLdb.connect(user = username, passwd = passwordDB,
host = servername, db = databasename)
@app.route("/link_by_source/", methods = ['GET', 'POST', 'PATCH', 'PUT', 'DELETE'])
def exec_query(query) :
"""
input : query
output : result of the query
this function execute the query from the data base
"""
cursor = db.cursor()
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
return result
def api_link_by_source():
"""
input : /
output : json data
this function returns a json data formated the way Stats wanted it
"""
week = {}
list_word = []
date_min = """SELECT MIN(date_publication) FROM mv_tf_idf_week"""
date_max = """SELECT MAX(date_publication) FROM mv_tf_idf_week"""
date_min_res = exec_query (date_min)
date_max_res = exec_query (date_max)
date_max_res = str(date_max_res[0][0])
date_min_res = str(date_min_res[0][0])
week["Period"] = date_min_res + " - " + date_max_res
id_words = """SELECT DISTINCT id_word FROM mv_tf_idf_week WHERE
date_publication BETWEEN %s and %s ORDER BY id_word
""" % ("'" + date_min_res + "'", "'" + date_max_res + "'" )
id_words_res = exec_query (id_words)
for i in range(0, len(id_words_res)):
list_word.append(id_words_res[i][0])
for word in range(0, len(list_word)):
week_words_tf_idf =[]
week_words_tf = []
list_week = []
for day in range(7):
day_query = datetime.strptime(date_min_res, "%Y-%m-%d") \
+ timedelta(days=day)
list_article = []
id_article = """SELECT id_article FROM mv_tf_idf_week WHERE
date_publication = %s AND id_word = %s ORDER BY id_article
""" % ("'" + str(day_query) + "'", list_word[word])
id_article_res = exec_query (id_article)
list_tf_idf = []
list_tf = []
for article in range(0, len(id_article_res)):
list_article.append(id_article_res[article][0])
q_tf_idf = """SELECT tf_idf FROM mv_tf_idf_week WHERE
id_word = %s AND id_article = %s AND date_publication = %s
""" % (list_word[word], list_article[article], "'"
+ str(day_query) + "'")
tf_idf_res = exec_query (q_tf_idf)
tf = []
tf_idf = []
for j in range(0, len(tf_idf_res)):
tf_idf.append(tf_idf_res[j][0])
list_tf_idf.append(tf_idf[0])
q_tf = """SELECT tf FROM mv_tf_idf_week WHERE
id_word = %s AND id_article = %s AND date_publication = %s
""" % (list_word[word], list_article[article], "'"
+ str(day_query) + "'")
tf_res = exec_query (q_tf)
for k in range(0, len(tf_res)):
tf.append(tf_res[k][0])
list_tf.append(tf[0])
week_words_tf_idf.append(list_tf_idf)
list_week.append(list_article)
week_words_tf.append(list_tf)
week[str(list_word[word])+"_tf_idf"] = week_words_tf_idf
week[str(list_word[word])+"_tf"] = week_words_tf
json = jsonify(week)
return json | [
"kps@anthracite.local"
] | kps@anthracite.local |
8e8d4a5bcd47f7570f4054c3f0e64a1f6642bbbf | 80c9df63673ffa88ed1ef54cece43e14734d1e0f | /run-client.py | 21039e4c700e389c1260dcd2f846aa44f0bf5673 | [] | no_license | Chovin/Troop | 03407e4ebc1012253bf1e6b1415e9db6254fc9c4 | 37ade96be9d5cc0d34b7b106755fe9d8f5ea8c2a | refs/heads/master | 2021-08-06T20:18:32.344172 | 2017-10-31T15:20:36 | 2017-10-31T15:20:36 | 109,102,534 | 0 | 0 | null | 2017-11-01T07:48:16 | 2017-11-01T07:48:16 | null | UTF-8 | Python | false | false | 1,917 | py | #!/usr/bin/env python
"""
Troop-Client
------------
Real-time collaborative Live Coding.
- Troop is a real-time collaborative tool that enables group live
coding within the same document. To run the client application it
must be able to connect to a running Troop Server instance on
your network.
- Using other Live Coding Languages:
Troop is designed to be used with FoxDot (http://foxdot.org) but
is also configured to work with Tidal Cycles (http://tidalcycles.org).
You can run this file with the `--mode` flag followed by "tidalcycles"
to use the Tidal Cycles language. You can also use any other application
that can accept code commands as strings via the stdin by specifying
the path of the interpreter application, such as ghci in the case of
Tidal Cycles, in place of the "tidalcycles" string when using the
`--mode` flag.
"""
from src.client import Client
from src.config import *
import os.path
import sys
if "--mode" in sys.argv:
name = sys.argv[ sys.argv.index("--mode") + 1 ]
lang = getInterpreter(name)
else:
lang = FOXDOT
if "-p" in sys.argv or "--public" in sys.argv:
host, port = PUBLIC_SERVER_ADDRESS
elif os.path.isfile('client.cfg'):
host, port = Client.read_configuration_file('client.cfg')
"""
You can set a configuration file if you are connecting to the same
server on repeated occasions. A password should not be stored. The
file (client.cfg) should look like:
host=<host_ip>
port=<port_no>
"""
else:
host = readin("Troop Server Address", default="localhost")
port = readin("Port Number", default="57890")
if "--log" in sys.argv or "-l" in sys.argv:
logging = True
else:
logging = False
name = readin("Enter a name").replace(" ", "_")
myClient = Client(host, port, name, lang, logging)
| [
"ryankirkbride26@gmail.com"
] | ryankirkbride26@gmail.com |
f9d2b343bf2502d1ead8b7f8b4962e7f3498deb9 | 8fd07ea363ba4263bafe25d213c72cc9a93e2b3e | /nsd2018-master/nsd1802/python/day03/position_args.py | 24fd09342ec789d08e99e8d2d19e2c6ac10e7456 | [] | no_license | ml758392/python_tedu | 82e12ae014f0fc81230386fab07f901510fc8837 | 9f20798604db0ac8cd7b69d8c7a52ee361ebc7a7 | refs/heads/master | 2020-04-12T08:30:42.354663 | 2019-03-29T11:55:30 | 2019-03-29T11:55:30 | 162,386,878 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | import sys
print(sys.argv) # sys.argv是sys模块里的argv列表
# python3 position_args.py
# python3 position_args.py 10
# python3 position_args.py 10 bob
| [
"yy.tedu.cn"
] | yy.tedu.cn |
868c8919f895151b196d2ca5d3243b356e5b1603 | 7a57aeafb5bcf30510649a7e9e32400ff7994815 | /virtual/bin/django-admin | 3478668038e80cdb831bf4811a9756da9fab1a02 | [] | no_license | EugeneZnm/FINPLANNER | 2b17e53a461742a4889362b509b29cf5a4484be0 | ef3dd88be02bed091dbadcc9fbc500bd1ff9740d | refs/heads/master | 2022-12-15T22:51:08.822562 | 2020-12-01T22:59:08 | 2020-12-01T22:59:08 | 155,441,523 | 1 | 0 | null | 2022-11-22T03:05:21 | 2018-10-30T19:06:14 | Python | UTF-8 | Python | false | false | 323 | #!/home/eugene/Documents/Moringa/CORE/PYTHON/FINPLANNER/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"eugenenzioki@gmail.com"
] | eugenenzioki@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.