blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa478f8db29f79c33c87d31990ab69000a083ef4
|
ec153cf6c65b02d8d714e042bbdcf476001c6332
|
/openstack_dashboard/enabled/_802_metadata_defs.py
|
715c3f57adb1b772f3513ae78202a66c8feed358
|
[] |
no_license
|
bopopescu/dashboard
|
c4322f7602a9ba589400212aaef865ed4ffa8bdb
|
a74b4a549cd7d516dd9a0f5f2e17d06679c13bf6
|
refs/heads/master
| 2022-11-21T15:56:42.755310
| 2017-07-05T12:04:14
| 2017-07-05T12:04:17
| 281,596,428
| 0
| 0
| null | 2020-07-22T06:38:37
| 2020-07-22T06:38:36
| null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'metadata_defs'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'system'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'default'
# Python panel class of the PANEL to be added.
ADD_PANEL = ('openstack_dashboard.dashboards.admin.metadata_defs.panel.MetadataDefinitions')
|
[
"laurencechan@qq.com"
] |
laurencechan@qq.com
|
a629ff545360e6bd157e394d377cbc1f1330141e
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_6/mtttaf002/question1.py
|
9e72945c0d743ddcf7d64cd2596254bb5b69226b
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
"""produce right aligned list of names
tafara mtutu
20 apr 2014"""
names = []
count = 0
aligned = []
sort = ""
#ask user for names
print("Enter strings (end with DONE):")
name = input()
while name.lower() != "done":
if count < len(name):
count = len(name)
names.append(name)
name = input()
#make length of equal to the length of longest string
for i in names:
sort = " "*(count-len(i)) + i
aligned.append(sort)
print()
print("Right-aligned list:")
for j in aligned:
print(j)
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
e5500f8613dd97c63af38a515d3fcaed24f1edfc
|
ef3fe422fc5644ce37cef2e8eb47a615e0865f27
|
/0x00-python_variable_annotations/100-safe_first_element.py
|
a68a172a7b3aeffd93fd5ece78bd0461e3d8fca2
|
[] |
no_license
|
Manuelpv17/holbertonschool-web_back_end
|
b1b6d993b378f60e3d2312079b49fb059a2e14a7
|
c4c60bf08648a8e9c846147808b6a7fbd9a818a7
|
refs/heads/main
| 2023-08-27T11:10:50.496692
| 2021-10-17T16:54:21
| 2021-10-17T16:54:21
| 366,537,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
#!/usr/bin/env python3
""" 10. Duck typing - first element of a sequence """
from typing import Sequence, Union, Any
def safe_first_element(lst: Sequence[Any]) -> Union[Any, None]:
""" 10. Duck typing - first element of a sequence """
if lst:
return lst[0]
else:
return None
|
[
"manuelpv17@outlook.com"
] |
manuelpv17@outlook.com
|
fdc09392606dbaa4da061b3a530db0f87a8dc68c
|
8771c94dce3c7e30c9e5b5f45cf8683ba9cac6fd
|
/leetcode/algorithms/p0338_counting_bits_1.py
|
369900a44f586dcd107afb5c442e1ac2172ed57f
|
[] |
no_license
|
J14032016/LeetCode-Python
|
f2a80ecb7822cf12a8ae1600e07e4e6667204230
|
9a8f5329d7c48dd34de3105c88afb5e03c2aace4
|
refs/heads/master
| 2023-03-12T02:55:45.094180
| 2021-03-07T07:55:03
| 2021-03-07T07:55:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
from typing import List
class Solution:
def countBits(self, num: int) -> List[int]:
return [self._hammingWeight(x) for x in range(num + 1)]
def _hammingWeight(self, n: int) -> int:
count = 0
while n > 0:
n = n & (n - 1)
count += 1
return count
|
[
"mao_xiaodan@hotmail.com"
] |
mao_xiaodan@hotmail.com
|
10d914f403ac5bfd4aacc7330c3db318947f429e
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/net/net_ifgrp_info.py
|
51fb53a5a5d184165370e0966a17a0a5662d4247
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133
| 2015-01-14T17:40:46
| 2015-01-14T17:40:46
| 29,256,898
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,702
|
py
|
from netapp.netapp_object import NetAppObject
class NetIfgrpInfo(NetAppObject):
"""
Network interface group information
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_node = None
@property
def node(self):
"""
Specifies the name of node.
Attributes: key, required-for-create, non-modifiable
"""
return self._node
@node.setter
def node(self, val):
if val != None:
self.validate('node', val)
self._node = val
_up_ports = None
@property
def up_ports(self):
"""
Specifies all active ports of an ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._up_ports
@up_ports.setter
def up_ports(self, val):
if val != None:
self.validate('up_ports', val)
self._up_ports = val
_down_ports = None
@property
def down_ports(self):
"""
Specifies all inactive ports of an ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._down_ports
@down_ports.setter
def down_ports(self, val):
if val != None:
self.validate('down_ports', val)
self._down_ports = val
_mac_address = None
@property
def mac_address(self):
"""
Specifies the MAC address of the ifgrp.
For example: '02:0c:29:78:e1:b7'
Attributes: non-creatable, non-modifiable
"""
return self._mac_address
@mac_address.setter
def mac_address(self, val):
if val != None:
self.validate('mac_address', val)
self._mac_address = val
_ifgrp_name = None
@property
def ifgrp_name(self):
"""
Specifies the interface group name.
Attributes: key, required-for-create, non-modifiable
"""
return self._ifgrp_name
@ifgrp_name.setter
def ifgrp_name(self, val):
if val != None:
self.validate('ifgrp_name', val)
self._ifgrp_name = val
_mode = None
@property
def mode(self):
"""
Specifies the link policy for the ifgrp.
Possible values:
<ul>
<li> 'multimode - All links are simultaneously
active',
<li> 'multimode_lacp - Link state is managed by the
switch using link aggregation control protocol (LACP)
(IEEE 802.3ad)',
<li> 'singlemode - Only one link is active at a
time'
</ul>
Attributes: required-for-create, non-modifiable
"""
return self._mode
@mode.setter
def mode(self, val):
if val != None:
self.validate('mode', val)
self._mode = val
_port_participation = None
@property
def port_participation(self):
"""
Port participation state of the ifgrp.
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "full" - Indicates all the ifgrp ports are
active,
<li> "partial" - Indicates not all the ifgrp ports
are active,
<li> "none" - Indicates none of the ifgrp ports is
active
</ul>
"""
return self._port_participation
@port_participation.setter
def port_participation(self, val):
if val != None:
self.validate('port_participation', val)
self._port_participation = val
_ports = None
@property
def ports(self):
"""
List of ports associated with this ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._ports
@ports.setter
def ports(self, val):
if val != None:
self.validate('ports', val)
self._ports = val
_distribution_function = None
@property
def distribution_function(self):
"""
Specifies the traffic distribution function for the
ifgrp.
Attributes: required-for-create, non-modifiable
Possible values:
<ul>
<li> "mac" - Network traffic is distributed
on the basis of MAC addresses,
<li> "ip" - Network traffic is distributed
on the basis of IP addresses,
<li> "sequential" - Network traffic is distributed
round-robin to each interface,
<li> "port" - Network traffic is distributed
by transport layer address 4-tuple
</ul>
"""
return self._distribution_function
@distribution_function.setter
def distribution_function(self, val):
if val != None:
self.validate('distribution_function', val)
self._distribution_function = val
@staticmethod
def get_api_name():
return "net-ifgrp-info"
@staticmethod
def get_desired_attrs():
return [
'node',
'up-ports',
'down-ports',
'mac-address',
'ifgrp-name',
'mode',
'port-participation',
'ports',
'distribution-function',
]
def describe_properties(self):
return {
'node': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'up_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'down_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'mac_address': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'ifgrp_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'mode': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'port_participation': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'distribution_function': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
[
"radek@gruchalski.com"
] |
radek@gruchalski.com
|
b86128aee5418c0b7ac108bd068d443064cc3ec0
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_sermon.py
|
40a9742cbaf0299a9d7ec6767d646bfc24b37d57
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
#calss header
class _SERMON():
def __init__(self,):
self.name = "SERMON"
self.definitions = [u'a part of a Christian church ceremony in which a priest gives a talk on a religious or moral subject, often based on something written in the Bible: ', u'a long talk in which someone advises other people how they should behave in order to be better people: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
8adb355b8d8850f4f2de49b4f36daf51077ab7e9
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/surface/compute/target_pools/create.py
|
8e3acd78a8ae36cdba093c5765105c6b9efc81bf
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 6,524
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for creating target pools."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.third_party.apis.compute.v1 import compute_v1_messages
SESSION_AFFINITIES = sorted(
compute_v1_messages.TargetPool.SessionAffinityValueValuesEnum
.to_dict().keys())
class Create(base_classes.BaseAsyncCreator):
"""Define a load-balanced pool of virtual machine instances.
*{command}* is used to create a target pool. A target pool resource
defines a group of instances that can receive incoming traffic
from forwarding rules. When a forwarding rule directs traffic to a
target pool, Google Compute Engine picks an instance from the
target pool based on a hash of the source and
destination IP addresses and ports. For more
information on load balancing, see
link:https://cloud.google.com/compute/docs/load-balancing-and-autoscaling/[].
To add instances to a target pool, use 'gcloud compute
target-pools add-instances'.
"""
@staticmethod
def Args(parser):
backup_pool = parser.add_argument(
'--backup-pool',
help='Defines the fallback pool for the target pool.')
backup_pool.detailed_help = """\
Together with ``--failover-ratio'', this flag defines the fallback
behavior of the target pool (primary pool) to be created by this
command. If the ratio of the healthy instances in the primary pool
is at or below the specified ``--failover-ratio value'', then traffic
arriving at the load-balanced IP address will be directed to the
backup pool. If this flag is provided, then ``--failover-ratio'' is
required.
"""
parser.add_argument(
'--description',
help='An optional description of this target pool.')
failover_ratio = parser.add_argument(
'--failover-ratio',
type=float,
help=('The ratio of healthy instances below which the backup pool '
'will be used.'))
failover_ratio.detailed_help = """\
Together with ``--backup-pool'', defines the fallback behavior of the
target pool (primary pool) to be created by this command. If the
ratio of the healthy instances in the primary pool is at or below this
number, traffic arriving at the load-balanced IP address will be
directed to the backup pool. For example, if 0.4 is chosen as the
failover ratio, then traffic will fail over to the backup pool if
more than 40% of the instances become unhealthy.
If not set, the traffic will be directed the
instances in this pool in the ``force'' mode, where traffic will be
spread to the healthy instances with the best effort, or to all
instances when no instance is healthy.
If this flag is provided, then ``--backup-pool'' is required.
"""
health_check = parser.add_argument(
'--health-check',
help=('Specifies HttpHealthCheck to determine the health of instances '
'in the pool.'),
metavar='HEALTH_CHECK')
health_check.detailed_help = """\
Specifies an HTTP health check resource to use to determine the health
of instances in this pool. If no health check is specified, traffic will
be sent to all instances in this target pool as if the instances
were healthy, but the health status of this pool will appear as
unhealthy as a warning that this target pool does not have a health
check.
"""
utils.AddRegionFlag(
parser,
resource_type='target pool',
operation_type='create')
session_affinity = parser.add_argument(
'--session-affinity',
choices=SESSION_AFFINITIES,
type=lambda x: x.upper(),
default='NONE',
help='The session affinity option for the target pool.')
session_affinity.detailed_help = """\
Specifies the session affinity option for the connection.
If ``NONE'' is selected, then connections from the same client
IP address may go to any instance in the target pool.
If ``CLIENT_IP'' is selected, then connections
from the same client IP address will go to the same instance
in the target pool.
If ``CLIENT_IP_PROTO'' is selected, then connections from the same
client IP with the same IP protocol will go to the same client pool.
If not specified, then ``NONE'' is used as a default.
"""
parser.add_argument(
'name',
help='The name of the target pool.')
@property
def service(self):
return self.compute.targetPools
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'targetPools'
def CreateRequests(self, args):
"""Returns a list of requests necessary for adding a target pool."""
if ((args.backup_pool and not args.failover_ratio) or
(args.failover_ratio and not args.backup_pool)):
raise calliope_exceptions.ToolException(
'Either both or neither of [--failover-ratio] and [--backup-pool] '
'must be provided.')
if args.failover_ratio is not None:
if args.failover_ratio < 0 or args.failover_ratio > 1:
raise calliope_exceptions.ToolException(
'[--failover-ratio] must be a number between 0 and 1, inclusive.')
if args.health_check:
health_check = [self.CreateGlobalReference(
args.health_check, resource_type='httpHealthChecks').SelfLink()]
else:
health_check = []
target_pool_ref = self.CreateRegionalReference(args.name, args.region)
if args.backup_pool:
backup_pool_uri = self.CreateRegionalReference(
args.backup_pool, target_pool_ref.region).SelfLink()
else:
backup_pool_uri = None
request = self.messages.ComputeTargetPoolsInsertRequest(
targetPool=self.messages.TargetPool(
backupPool=backup_pool_uri,
description=args.description,
failoverRatio=args.failover_ratio,
healthChecks=health_check,
name=target_pool_ref.Name(),
sessionAffinity=(
self.messages.TargetPool.SessionAffinityValueValuesEnum(
args.session_affinity))),
region=target_pool_ref.region,
project=self.project)
return [request]
|
[
"joe@longreen.io"
] |
joe@longreen.io
|
0b52a7d8625cdde3d880fe9de03a47671ea10878
|
6e8d58340f2be5f00d55e2629052c0bbc9dcf390
|
/tools/data_source/microbial_import_code.py
|
4efa96a13a738387c857a1897e50eaa3739530c4
|
[
"CC-BY-2.5",
"MIT"
] |
permissive
|
JCVI-Cloud/galaxy-tools-prok
|
e57389750d33ac766e1658838cdb0aaf9a59c106
|
3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c
|
refs/heads/master
| 2021-05-02T06:23:05.414371
| 2014-03-21T18:12:43
| 2014-03-21T18:12:43
| 6,092,693
| 0
| 2
|
NOASSERTION
| 2020-07-25T20:38:17
| 2012-10-05T15:57:38
|
Python
|
UTF-8
|
Python
| false
| false
| 7,420
|
py
|
def load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' ):
# FIXME: this function is duplicated in the DynamicOptions class. It is used here only to
# set data.name in exec_after_process().
microbe_info= {}
orgs = {}
filename = "%s/microbial_data.loc" % GALAXY_DATA_INDEX_DIR
for i, line in enumerate( open( filename ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
fields = line.split( sep )
#read each line, if not enough fields, go to next line
try:
info_type = fields.pop(0)
if info_type.upper() == "ORG":
#ORG 12521 Clostridium perfringens SM101 bacteria Firmicutes CP000312,CP000313,CP000314,CP000315 http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=genomeprj&cmd=Retrieve&dopt=Overview&list_uids=12521
org_num = fields.pop(0)
name = fields.pop(0)
kingdom = fields.pop(0)
group = fields.pop(0)
chromosomes = fields.pop(0)
info_url = fields.pop(0)
link_site = fields.pop(0)
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'name' ] = name
orgs[ org_num ][ 'kingdom' ] = kingdom
orgs[ org_num ][ 'group' ] = group
orgs[ org_num ][ 'chromosomes' ] = chromosomes
orgs[ org_num ][ 'info_url' ] = info_url
orgs[ org_num ][ 'link_site' ] = link_site
elif info_type.upper() == "CHR":
#CHR 12521 CP000315 Clostridium perfringens phage phiSM101, complete genome 38092 110684521 CP000315.1
org_num = fields.pop(0)
chr_acc = fields.pop(0)
name = fields.pop(0)
length = fields.pop(0)
gi = fields.pop(0)
gb = fields.pop(0)
info_url = fields.pop(0)
chr = {}
chr[ 'name' ] = name
chr[ 'length' ] = length
chr[ 'gi' ] = gi
chr[ 'gb' ] = gb
chr[ 'info_url' ] = info_url
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ] = chr
elif info_type.upper() == "DATA":
#DATA 12521_12521_CDS 12521 CP000315 CDS bed /home/djb396/alignments/playground/bacteria/12521/CP000315.CDS.bed
uid = fields.pop(0)
org_num = fields.pop(0)
chr_acc = fields.pop(0)
feature = fields.pop(0)
filetype = fields.pop(0)
path = fields.pop(0)
data = {}
data[ 'filetype' ] = filetype
data[ 'path' ] = path
data[ 'feature' ] = feature
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
if 'data' not in orgs[ org_num ][ 'chrs' ][ chr_acc ]:
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ][ uid ] = data
else: continue
except: continue
for org_num in orgs:
org = orgs[ org_num ]
if org[ 'kingdom' ] not in microbe_info:
microbe_info[ org[ 'kingdom' ] ] = {}
if org_num not in microbe_info[ org[ 'kingdom' ] ]:
microbe_info[ org[ 'kingdom' ] ][org_num] = org
return microbe_info
#post processing, set build for data and add additional data to history
from galaxy import datatypes, config, jobs, tools
from shutil import copyfile
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
base_dataset = out_data.items()[0][1]
history = base_dataset.history
if history == None:
print "unknown history!"
return
kingdom = param_dict.get( 'kingdom', None )
#group = param_dict.get( 'group', None )
org = param_dict.get( 'org', None )
#if not (kingdom or group or org):
if not (kingdom or org):
print "Parameters are not available."
#workflow passes galaxy.tools.parameters.basic.UnvalidatedValue instead of values
if isinstance( kingdom, tools.parameters.basic.UnvalidatedValue ):
kingdom = kingdom.value
if isinstance( org, tools.parameters.basic.UnvalidatedValue ):
org = org.value
GALAXY_DATA_INDEX_DIR = app.config.tool_data_path
microbe_info = load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' )
new_stdout = ""
split_stdout = stdout.split("\n")
basic_name = ""
for line in split_stdout:
fields = line.split("\t")
if fields[0] == "#File1":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
file_type = fields[4]
name, data = out_data.items()[0]
data.set_size()
basic_name = data.name
data.name = data.name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] +" for " + microbe_info[kingdom][org]['name'] + ":" + chr + ")"
data.dbkey = dbkey
data.info = data.name
data = app.datatypes_registry.change_datatype( data, file_type )
data.init_meta()
data.set_peek()
app.model.context.add( data )
app.model.context.flush()
elif fields[0] == "#NewFile":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
filepath = fields[4]
file_type = fields[5]
newdata = app.model.HistoryDatasetAssociation( create_dataset = True, sa_session = app.model.context ) #This import should become a library
newdata.set_size()
newdata.extension = file_type
newdata.name = basic_name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] +" for "+microbe_info[kingdom][org]['name']+":"+chr + ")"
app.model.context.add( newdata )
app.model.context.flush()
app.security_agent.copy_dataset_permissions( base_dataset.dataset, newdata.dataset )
history.add_dataset( newdata )
app.model.context.add( history )
app.model.context.flush()
try:
copyfile(filepath,newdata.file_name)
newdata.info = newdata.name
newdata.state = jobs.JOB_OK
except:
newdata.info = "The requested file is missing from the system."
newdata.state = jobs.JOB_ERROR
newdata.dbkey = dbkey
newdata.init_meta()
newdata.set_peek()
app.model.context.flush()
|
[
"root@ip-10-118-137-129.ec2.internal"
] |
root@ip-10-118-137-129.ec2.internal
|
0768b2e247703f696bc61b8a9841da4430449517
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/cylicRot_20200714235552.py
|
df3ed8e6be65ea59b21b9730c1d84306bfff2615
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
# given an array rotate it k times to the right
def rotate(A,K):
# first I'd rotate the array once
# so how do we rotate the array
# we move the last element to the firs place and
# the rest follow suit
# moving elements to the right in an array
# [3,8,9,7,6]
# [6,3,8,9,7]
for i in range(len(A)):
A[i] = A[len(A)-1]
A[len(A)-1] = A[len(A)-2]
A[i] = A[i+1]
rotate([3, 8, 9, 7, 6], 3)
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
46dc15bb2d04819454a71d40ffa2011043e35239
|
f8f70ed663ffccf61a739332697da5c97a41b9cf
|
/setup.py
|
43d9b2c1a862d1468af29616515d7b25aedf5b33
|
[
"MIT"
] |
permissive
|
kanurag94/rstcheck
|
123116993d9d33e3efdbafe889de94b48dd2cbe8
|
fdf2d324bf20357fd47f7579c58fec693f71a120
|
refs/heads/master
| 2020-06-22T17:04:39.016288
| 2019-07-21T18:21:20
| 2019-07-21T18:21:20
| 197,751,048
| 0
| 0
|
MIT
| 2019-07-19T10:10:13
| 2019-07-19T10:10:13
| null |
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
#!/usr/bin/env python
"""Installer for rstcheck."""
import ast
import io
import setuptools
def version():
"""Return version string."""
with io.open('rstcheck.py', encoding='utf-8') as input_file:
for line in input_file:
if line.startswith('__version__'):
return ast.parse(line).body[0].value.s
with io.open('README.rst', encoding='utf-8') as readme:
setuptools.setup(
name='rstcheck',
version=version(),
url='https://github.com/myint/rstcheck',
description='Checks syntax of reStructuredText and code blocks nested '
'within it',
long_description=readme.read(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Quality Assurance',
],
keywords='restructuredtext,lint,check,pypi,readme,rst,analyze',
py_modules=['rstcheck'],
entry_points={'console_scripts': ['rstcheck = rstcheck:main']},
install_requires=['docutils >= 0.7'])
|
[
"git@stevenmyint.com"
] |
git@stevenmyint.com
|
76243e46b0928b11c197e4e1c939786c01d8cf63
|
72bc1c9c8d5dd0b185fa4444ac4d6d721d097480
|
/cooperative/analysis/analyse_cooperative.py
|
0af1fa202c00721ee6ffe8d97996ea2447abf993
|
[] |
no_license
|
PeppaYao/shepherding-problem
|
ad54e5051d193f71e6301d9d94d8f2b0a05d8b50
|
15e199f0fb771891bcbfb804d653b95e8c141c59
|
refs/heads/main
| 2023-05-06T01:20:05.874427
| 2021-05-26T01:34:17
| 2021-05-26T01:34:17
| 305,695,862
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
import numpy as np
import matplotlib.pyplot as plt
# 合作:最远距离+驱赶
# 合作:最大角度+驱赶
fig, ax = plt.subplots()
X = np.arange(40, 51)
Y = np.array([1442.000, 1191.000, 1495.000, 1266.000, 1110.000, 563.000, 594.000, 545.000, 533.000, 641.000, 676.000, ])
Y2 = np.array([4001.000, 2179.000, 2610.000, 4001.000, 4001.000, 2017.000, 1599.000, 3604.000, 1222.000, 2871.000, 4001.000, ])
plt.plot(X, Y, 'purple', label="mam")
plt.plot(X, Y2, 'darkcyan', label="sppl")
plt.xlabel("the number of sheep")
plt.ylabel("dispersion")
# plt.xticks(np.arange(0, 100, 10))
plt.legend()
plt.xlim(40, 51)
plt.ylim(0, 4200)
plt.grid()
plt.show()
|
[
"940334249@qq.com"
] |
940334249@qq.com
|
0693ba5058ee6afecaa80396bfe052d8f61a5d6e
|
242d8d05e457107bed4c539b9cbd117d2733614d
|
/untitled1.py
|
6163b8dfaa7cfea4d89ae31f5995c63a8706940c
|
[] |
no_license
|
lucmilot/datastage
|
b4a7abd17cec360db2fc814eddf26174ab807b9b
|
5f06b74e87d25cee1c98394e4593200579cb18d6
|
refs/heads/master
| 2021-05-18T16:47:16.100681
| 2020-03-30T13:54:06
| 2020-03-30T13:54:06
| 251,323,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 13:43:46 2018
@author: XT21586
"""
import win32com.client as win32
pathx = "C:\\Users\\XT21586\Documents\\document\\Data Stage\\python\\build\\exe.win-amd64-3.6\\"
excel = win32.gencache.EnsureDispatch('Excel.Application')
outfilxls1 = pathx + "result1.xls"
wb = excel.Workbooks.Open(outfilxls1)
#try:
# wb = excel.Workbooks.Open(outfilxls1)
#except:
# print (outfilxls1 +" is already open!")
excel.Visible = True
|
[
"40570847+lucmilot@users.noreply.github.com"
] |
40570847+lucmilot@users.noreply.github.com
|
9356a925249fc9974103fcf1d00723517c16e27b
|
5a2297cff798f4ac03255a803a25177d19235020
|
/ipcrawler/spiders/xiciScrapy.py
|
da60480513a59924f4be0effc632c0fcfe596ecc
|
[] |
no_license
|
yidun55/ipcrawler
|
0d51184922470483f277ec4d1f40c2920f7b0bc5
|
7dd804bb687df57139addd63fe5e1284fea93e2d
|
refs/heads/master
| 2021-01-19T07:31:08.804177
| 2015-07-28T09:15:51
| 2015-07-28T09:15:51
| 39,173,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
#!usr/bin/env python
#coding: utf-8
"""
从专利局官网上爬取各公司的专利信息
"""
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy import log
from scrapy import Selector
import sys
from ipcrawler.items import *
reload(sys)
sys.setdefaultencoding("utf-8")
class patenttest(Spider):
# download_delay=20
name = 'xici'
start_urls = ['http://www.xici.net.co/nn']
def parse(self, response):
"""
获取总页数
"""
urls = ["http://www.xici.net.co/nn/"+str(i) for\
i in xrange(1, 204)]
for url in urls:
yield Request(url, callback=self.detail,\
dont_filter=True)
def detail(self, response):
sel = Selector(text=response.body)
ips = sel.xpath("//table[@id='ip_list']/tr[position()>1]\
/td[3]/text()").extract()
ports = sel.xpath("//table[@id='ip_list']/tr[position()>1]\
/td[4]/text()").extract()
scheme = sel.xpath("//table[@id='ip_list']/tr[position()>1]\
/td[7]/text()").extract()
if len(ips) == len(ports):
te = zip(ips,ports)
last = zip(scheme,[":".join(item) for item in te])
last = [(item[0].lower(),item[1]) for item in last]
ip_port = ["://".join(item)+"\n" for item in last]
ips_ports = "".join(ip_port)
print ips_ports
item = IpcrawlerItem()
item['content'] = ips_ports
return item
else:
log.msg("error in xpath",level=log.ERROR)
|
[
"heshang1203@sina.com"
] |
heshang1203@sina.com
|
1867e8a3098592e90d6acaeabf4754755bba7650
|
e79888cd68177e7ec5125270cdc52f888e211e78
|
/kiyuna/chapter05/knock45.py
|
cf7d05c2a04832c8e87b16b0bf9d6c051cc71b0d
|
[] |
no_license
|
cafenoctua/100knock2019
|
ec259bee27936bdacfe0097d42f23cc7500f0a07
|
88717a78c4290101a021fbe8b4f054f76c9d3fa6
|
refs/heads/master
| 2022-06-22T04:42:03.939373
| 2019-09-03T11:05:19
| 2019-09-03T11:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,036
|
py
|
'''
45. 動詞の格パターンの抽出
今回用いている文章をコーパスと見なし,日本語の述語が取りうる格を調査したい.
動詞を述語,動詞に係っている文節の助詞を格と考え,述語と格をタブ区切り形式で出力せよ.
ただし,出力は以下の仕様を満たすようにせよ.
- 動詞を含む文節において,最左の動詞の基本形を述語とする
- 述語に係る助詞を格とする
- 述語に係る助詞(文節)が複数あるときは,すべての助詞をスペース区切りで辞書順に並べる
「吾輩はここで始めて人間というものを見た」という例文(neko.txt.cabochaの8文目)を考える.
この文は「始める」と「見る」の2つの動詞を含み,「始める」に係る文節は「ここで」,
「見る」に係る文節は「吾輩は」と「ものを」と解析された場合は,次のような出力になるはずである.
始める で
見る は を
このプログラムの出力をファイルに保存し,以下の事項をUNIXコマンドを用いて確認せよ.
- コーパス中で頻出する述語と格パターンの組み合わせ
- 「する」「見る」「与える」という動詞の格パターン(コーパス中で出現頻度の高い順に並べよ)
'''
import sys
from knock41 import cabocha_into_chunks, Chunk
def message(text):
sys.stderr.write(f"\33[92m{text}\33[0m\n")
class Chunk_normalized(Chunk):
def __init__(self, chunk):
self.morphs, self.dst, self.srcs = (*chunk,)
self.norm = self.norm()
def norm(self):
clause = ''.join(m.surface for m in self.morphs if m.pos != '記号')
return clause
def has_pos(self, pos):
for m in self.morphs:
if m.pos == pos:
return True
return False
def get_pos(self, pos):
res = []
for m in self.morphs:
if m.pos == pos:
res.append(m)
return res
if __name__ == '__main__':
res = []
for chunks in cabocha_into_chunks():
chunks = tuple(map(Chunk_normalized, chunks.values()))
for dc in chunks:
if not dc.has_pos('動詞'):
continue
srcs = []
for sc_idx in dc.srcs:
# 述語に係る助詞を格とする
for m in chunks[sc_idx].get_pos('助詞'):
srcs.append(m.base)
# 動詞を含む文節において,最左の動詞の基本形を述語とする
base = dc.get_pos('動詞')[0].base
# 述語に係る助詞(文節)が複数あるときは,
# すべての助詞をスペース区切りで辞書順に並べる
srcs.sort()
particles = " ".join(srcs)
if srcs:
res.append(f'{base}\t{particles}\n')
sys.stdout.writelines(res)
message(f'{len(res)} 行書き出しました')
|
[
"kyuna.prog@gmail.com"
] |
kyuna.prog@gmail.com
|
ba5d6ca0a74e5b6778ad6e411e41aefd456ae06c
|
5cdbdc84b04c511a59ba649b64466d0ebe29f266
|
/config.py
|
a829585f69f893dea66b03d7e873500875699602
|
[] |
no_license
|
Shatnerz/glad
|
a3298503231e4b8529d5b47e57b8279d67960ae7
|
c58d44358c81a529559fc94825f911ea4ccb0e26
|
refs/heads/master
| 2021-05-16T03:21:05.146678
| 2020-03-09T14:56:57
| 2020-03-09T14:56:57
| 42,134,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
import ConfigParser
class BasicConfig(object):
def __init__(self, filename):
self.parser = ConfigParser.RawConfigParser()
self.parser.read(filename)
def get(section, option):
return self.parser.get(section, option)
def getBoolean(section, option):
return self.parser.getboolean(section,option)
def getResolution(section, option):
s = self.get(section, option)
l = s.split('x')
l = [int(x) for x in l]
return tuple(l)
|
[
"devnull@localhost"
] |
devnull@localhost
|
cc63c6d723f156472557a419377fda74f8a1e977
|
cfb4e8721137a096a23d151f2ff27240b218c34c
|
/mypower/matpower_ported/lib/toggle_reserves.py
|
58caa28e588ddccd4c28b9ff179664528955fe48
|
[
"Apache-2.0"
] |
permissive
|
suryo12/mypower
|
eaebe1d13f94c0b947a3c022a98bab936a23f5d3
|
ee79dfffc057118d25f30ef85a45370dfdbab7d5
|
refs/heads/master
| 2022-11-25T16:30:02.643830
| 2020-08-02T13:16:20
| 2020-08-02T13:16:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
def toggle_reserves(*args,nout=1,oc=None):
if oc == None:
from ...oc_matpower import oc_matpower
oc = oc_matpower()
return oc.toggle_reserves(*args,nout=nout)
|
[
"muhammadyasirroni@gmail.com"
] |
muhammadyasirroni@gmail.com
|
169fc535e0b99ab762810b308d9274646618d9a1
|
9e2f24027e4044252639563461116a895acce039
|
/biosteam/units/_vent_scrubber.py
|
6dcb1e61a54f0310f69b12cd9ae417fef47bd40b
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"NCSA"
] |
permissive
|
yalinli2/biosteam
|
5010b5d430cc746f6fa00a23805a1c1f5cac7a81
|
e7385ca1feac642881a357ffbc4461382549c3a4
|
refs/heads/master
| 2022-03-20T23:57:06.824292
| 2022-02-22T15:55:11
| 2022-02-22T15:55:11
| 190,422,353
| 0
| 0
|
MIT
| 2019-06-05T15:39:04
| 2019-06-05T15:39:03
| null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from .decorators import cost
from .. import Unit
__all__ = ('VentScrubber',)
@cost('Flow rate', units='kg/hr',
S=22608, CE=522, cost=215e3, n=0.6, BM=2.4)
class VentScrubber(Unit):
_N_ins = _N_outs = 2
_units = {'Flow rate': 'kg/hr'}
def __init__(self, ID='', ins=None, outs=(), thermo=None, *, gas):
Unit.__init__(self, ID, ins, outs, thermo)
self.gas = gas
def _run(self):
water, vent_entry = self.ins
vent_exit, bottoms = self.outs
vent_exit.copy_like(vent_entry)
bottoms.empty()
bottoms.copy_flow(vent_exit, self.gas,
remove=True, exclude=True)
bottoms.mix_from([bottoms, water], energy_balance=False)
def _design(self):
self.design_results['Flow rate'] = self._ins[1].F_mass
|
[
"yoelcortes@gmail.com"
] |
yoelcortes@gmail.com
|
e576d9e8d6f40fda097536aead9d3ee9a9634d63
|
f7c8df084dabf0d9c5dfa6dd15322a9cd8beb587
|
/misc/projecteuler/p0001.py
|
0fe3b39646525f0cada498e9d384d8a0cfa36161
|
[] |
no_license
|
sahands/problem-solving
|
6591464366bac635f53e0960eb5cd796bddaea8f
|
04d17ee2f55babcb106fdddd56a1caf7b65df2db
|
refs/heads/master
| 2021-01-02T09:15:38.686212
| 2014-10-23T02:26:08
| 2014-10-23T02:26:08
| 24,439,994
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
def sum_of_multiples(k, n):
"""Returns the sum of all multiples of k less than n."""
m = (n-1) // k
return k * (m * (m + 1)) / 2
if __name__ == '__main__':
n = 1000
a = sum_of_multiples(3, n)
b = sum_of_multiples(5, n)
c = sum_of_multiples(15, n)
print a + b - c
|
[
"sahands@gmail.com"
] |
sahands@gmail.com
|
0baf82b9755194c97a4cad88ec01c3161a46cf5e
|
d1f2a0473cc773986482607a4b1ee9de85627949
|
/model/darknet53.py
|
ab670311e2203e3875bae29868dcd5078aa16dd0
|
[] |
no_license
|
choodly/PaddlePaddle_yolact
|
fcf8273a66ce5b1a464bd30f97e77bad5362ad65
|
7344e6fa98b5451dfe47e725f3c6aabf85e71d10
|
refs/heads/master
| 2022-11-21T00:40:41.692806
| 2020-07-13T03:00:22
| 2020-07-13T03:00:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,460
|
py
|
#! /usr/bin/env python
# coding=utf-8
# ================================================================
#
# Author : miemie2013
# Created date: 2020-01-23 15:16:15
# Description : paddlepaddle_yolact++
#
# ================================================================
import paddle.fluid as fluid
import paddle.fluid.layers as P
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
def conv2d_unit(x, filters, kernels, stride, padding, name, is_test, trainable):
x = P.conv2d(
input=x,
num_filters=filters,
filter_size=kernels,
stride=stride,
padding=padding,
act=None,
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name=name + ".conv.weights", trainable=trainable),
bias_attr=False)
bn_name = name + ".bn"
x = P.batch_norm(
input=x,
act=None,
is_test=is_test,
param_attr=ParamAttr(
initializer=fluid.initializer.Constant(1.0),
regularizer=L2Decay(0.),
trainable=trainable,
name=bn_name + '.scale'),
bias_attr=ParamAttr(
initializer=fluid.initializer.Constant(0.0),
regularizer=L2Decay(0.),
trainable=trainable,
name=bn_name + '.offset'),
moving_mean_name=bn_name + '.mean',
moving_variance_name=bn_name + '.var')
x = P.leaky_relu(x, alpha=0.1)
return x
def residual_block(inputs, filters, conv_start_idx, is_test, trainable):
x = conv2d_unit(inputs, filters, (1, 1), stride=1, padding=0, name='conv%.2d'% conv_start_idx, is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 2 * filters, (3, 3), stride=1, padding=1, name='conv%.2d'% (conv_start_idx+1), is_test=is_test, trainable=trainable)
x = P.elementwise_add(x=inputs, y=x, act=None)
return x
def stack_residual_block(inputs, filters, n, conv_start_idx, is_test, trainable):
x = residual_block(inputs, filters, conv_start_idx, is_test, trainable)
for i in range(n - 1):
x = residual_block(x, filters, conv_start_idx+2*(1+i), is_test, trainable)
return x
def DarkNet53(inputs, is_test, trainable):
''' 所有卷积层都没有偏移bias_attr=False '''
x = conv2d_unit(inputs, 32, (3, 3), stride=1, padding=1, name='conv01', is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 64, (3, 3), stride=2, padding=1, name='conv02', is_test=is_test, trainable=trainable)
x = stack_residual_block(x, 32, n=1, conv_start_idx=3, is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 128, (3, 3), stride=2, padding=1, name='conv05', is_test=is_test, trainable=trainable)
x = stack_residual_block(x, 64, n=2, conv_start_idx=6, is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 256, (3, 3), stride=2, padding=1, name='conv10', is_test=is_test, trainable=trainable)
s8 = stack_residual_block(x, 128, n=8, conv_start_idx=11, is_test=is_test, trainable=trainable)
x = conv2d_unit(s8, 512, (3, 3), stride=2, padding=1, name='conv27', is_test=is_test, trainable=trainable)
s16 = stack_residual_block(x, 256, n=8, conv_start_idx=28, is_test=is_test, trainable=trainable)
x = conv2d_unit(s16, 1024, (3, 3), stride=2, padding=1, name='conv44', is_test=is_test, trainable=trainable)
s32 = stack_residual_block(x, 512, n=4, conv_start_idx=45, is_test=is_test, trainable=trainable)
return s8, s16, s32
|
[
"53960695+miemie2013@users.noreply.github.com"
] |
53960695+miemie2013@users.noreply.github.com
|
fe25948466810e069367b21e9f97ea3d090e7d98
|
a15200778946f6f181e23373525b02b65c44ce6e
|
/Algoritmi/2019-07-30/all-CMS-submissions/2019-07-30.09:06:48.930075.VR437056.tree_transcode_disc.py
|
281a15f510d37afd02faf19c70d70113b6a9bc3c
|
[] |
no_license
|
alberto-uni/portafoglioVoti_public
|
db518f4d4e750d25dcb61e41aa3f9ea69aaaf275
|
40c00ab74f641f83b23e06806bfa29c833badef9
|
refs/heads/master
| 2023-08-29T03:33:06.477640
| 2021-10-08T17:12:31
| 2021-10-08T17:12:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
"""
* user: VR437056
* fname: MARTINI
* lname: MICHELE
* task: tree_transcode_disc
* score: 50.0
* date: 2019-07-30 09:06:48.930075
"""
#!/usr/bin/env python3
# -*- coding: latin-1 -*-
from __future__ import print_function
import sys
sys.setrecursionlimit(100000)
if sys.version_info < (3, 0):
input = raw_input # in python2, raw_input svolge la funzione della primitiva input in python3
class Node:
def __init__(self, value=1, parent=None):
self.value = value
self.parent = parent
self.children = []
self.counter = 1
def set_parent(self, parent):
self.parent = parent
def add_child(self, child):
self.children += [child]
def add_pre_child(self, child):
self.children = [child] + self.children
def print_node(self):
print('value:', self.value, 'parent', end=" ")
if self.parent is None:
print('None', end=" ")
else:
print(self.parent.value, end=" ")
for child in self.children:
print(child.value, end=" ")
print()
def pre_visit(self, result):
result += str(self.value) + ' '
for child in self.children:
result = child.pre_visit(result)
return result
def post_visit(self, result):
for child in self.children:
result = child.post_visit(result)
result += str(self.value) + ' '
return result
MAX_N = 100
seq = list(map(int,input().split()))
if len(seq) < 2:
exit
tree = [None] * MAX_N
last_node = 0
if seq[0] == 1:
# Tree root
tree[last_node] = Node(seq[1])
current_parent = tree[0]
# Building of the tree
for i in range(2, len(seq)):
while current_parent.counter == current_parent.value:
current_parent = current_parent.parent
current_parent.counter += seq[i]
last_node += 1
tree[last_node] = Node(seq[i], current_parent)
current_parent.add_child(tree[last_node])
if seq[i] > 1:
current_parent = tree[last_node]
# Print the result
print('2', tree[0].post_visit(''))
if seq[0] == 2:
# First node must have value 1
tree[last_node] = Node(seq[1])
for i in range(2, len(seq)):
if seq[i] > 1:
current_parent = Node(seq[i])
counter = 1
while counter < seq[i]:
# Create link parent-child
counter += tree[last_node].value
tree[last_node].set_parent(current_parent)
current_parent.add_pre_child(tree[last_node])
# Remove child from array
tree[last_node] = None
last_node -= 1
last_node += 1
tree[last_node] = current_parent
else:
last_node += 1
tree[last_node] = Node(seq[i])
print('1', tree[0].pre_visit(''))
#for i in range(last_node + 1):
# tree[i].print_node()
|
[
"romeo.rizzi@univr.it"
] |
romeo.rizzi@univr.it
|
2b500c17ffc14a5693b584a428ff89f9e8c3bd15
|
f5a4f340da539520c60c4bce08356c6f5c171c54
|
/xrpl/asyncio/clients/__init__.py
|
4b2ff4b3ba3d535a0b6861fb8890cb748c22e3eb
|
[
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yyolk/xrpl-py
|
e3935c0a0f488793153ca29e9d71c197cf88f857
|
e5bbdf458ad83e6670a4ebf3df63e17fed8b099f
|
refs/heads/master
| 2023-07-17T03:19:29.239838
| 2021-07-03T01:24:57
| 2021-07-03T01:24:57
| 355,299,041
| 1
| 0
|
ISC
| 2021-04-08T05:29:43
| 2021-04-06T18:57:06
| null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
"""Asynchronous network clients for interacting with the XRPL."""
from xrpl.asyncio.clients.async_json_rpc_client import AsyncJsonRpcClient
from xrpl.asyncio.clients.async_websocket_client import AsyncWebsocketClient
from xrpl.asyncio.clients.client import Client
from xrpl.asyncio.clients.exceptions import XRPLRequestFailureException
from xrpl.asyncio.clients.utils import json_to_response, request_to_json_rpc
__all__ = [
"AsyncJsonRpcClient",
"AsyncWebsocketClient",
"Client",
"json_to_response",
"request_to_json_rpc",
"XRPLRequestFailureException",
"request_to_websocket",
"websocket_to_response",
]
|
[
"noreply@github.com"
] |
yyolk.noreply@github.com
|
83dfa312d9bd9029ca0080502186ef133d6477f5
|
e6bc1f55371786dad70313eb468a3ccf6000edaf
|
/Datasets/words-score/Correct/097.py
|
e9557ac613cf6b943e8bfe765d27f85eeb69ee64
|
[] |
no_license
|
prateksha/Source-Code-Similarity-Measurement
|
9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0
|
fb371b837917794d260a219a1ca09c46a5b15962
|
refs/heads/master
| 2023-01-04T07:49:25.138827
| 2020-10-25T14:43:57
| 2020-10-25T14:43:57
| 285,744,963
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
def is_vowel(letter):
return letter in ['a', 'e', 'i', 'o', 'u', 'y']
def score_words(words):
#print (words)
score = 0
for word in words:
num_vowels = 0
for letter in word:
if is_vowel(letter):
num_vowels += 1
if num_vowels % 2 == 0:
score += 2
else:
score +=1
return score
|
[
"pratekshau@gmail.com"
] |
pratekshau@gmail.com
|
5861b98d046738f027e1fc06dca64339dafa8a2d
|
2bdff209f959d7b577494f6ac908d3700ffb9eb6
|
/fractals.py
|
d43e792a24b5f521a6fa166147c3d1007df758cf
|
[] |
no_license
|
simrit1/Fractals
|
347ebb3867eb0fc3f99027a657197378323bb373
|
2d4b5ed05628f616c72eed996bf579d810b3065c
|
refs/heads/main
| 2023-03-25T16:16:18.324205
| 2021-03-25T12:16:11
| 2021-03-25T12:16:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,683
|
py
|
import pygame
import sys
import math
import colorsys
pygame.init()
WIDTH = 1920
HEIGHT = 1080
l_system_text = sys.argv[1]
start = int(sys.argv[2]), int(sys.argv[3])
length = int(sys.argv[4])
ratio = float(sys.argv[5])
with open(l_system_text) as f:
axiom = f.readline()
num_rules = int(f.readline())
rules = {}
for i in range(num_rules):
rule = f.readline().split(' ')
rules[rule[0]] = rule[1]
angle = math.radians(int(f.readline()))
class LSystem():
def __init__(self, axiom, rules, angle, start, length, ratio):
self.sentence = axiom
self.rules = rules
self.angle = angle
self.start = start
self.x, self.y = start
self.length = length
self.ratio = ratio
self.theta = math.pi / 2
self.positions = []
def __str__(self):
return self.sentence
def generate(self):
self.x, self.y = self.start
self.theta = math.pi / 2
self.length *= self.ratio
new_sentence = ""
for char in self.sentence:
mapped = char
try:
mapped = self.rules[char]
except:
pass
new_sentence += mapped
self.sentence = new_sentence
def draw(self, screen):
hue = 0
for char in self.sentence:
if char == 'F':
x2 = self.x - self.length * math.cos(self.theta)
y2 = self.y - self.length * math.sin(self.theta)
pygame.draw.line(screen, (hsv2rgb(hue, 1, 1)), (self.x, self.y), (x2, y2), 2)
self.x, self.y = x2, y2
elif char == '+':
self.theta += self.angle
elif char == '-':
self.theta -= self.angle
elif char == '[':
self.positions.append({'x': self.x, 'y': self.y, 'theta': self.theta})
elif char == ']':
position = self.positions.pop()
self.x, self.y, self.theta = position['x'], position['y'], position['theta']
hue += 0.00005
def hsv2rgb(h, s, v):
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h, s, v))
def main():
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.mouse.set_visible(False)
fractal = LSystem(axiom, rules, angle, start, length, ratio)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
keystate = pygame.key.get_pressed()
if keystate[pygame.K_SPACE]:
screen.fill((0, 0, 0))
fractal.draw(screen)
fractal.generate()
if keystate[pygame.K_ESCAPE]:
pygame.quit()
pygame.display.update()
main()
# Adrian-Mariano-Doily python fractals.py fractals/Adrian_Mariano_Doily.txt 1350 350 110 0.5
# Anthony-Hanmer-ADH231a python fractals.py fractals/Anthony_Hanmer_ADH231a.txt 960 1000 50 0.52
# Anthony-Hanmer-ADH256a python fractals.py fractals/Anthony_Hanmer_ADH256a.txt 650 850 50 0.55
# Anthony-Hanmer-ADH258a python fractals.py fractals/Anthony_Hanmer_ADH258a.txt 700 950 80 0.4
# Board python fractals.py fractals/board.txt 500 1000 100 0.52
# Box-fractal python fractals.py fractals/box-fractal.txt 1400 1000 100 0.52
# Classic-Sierpinski-curve python fractals.py fractals/classic-sierpinski-curve.txt 1150 750 30 0.5
# Cross python fractals.py fractals/cross.txt 950 250 250 0.5
# Crystal: python fractals.py fractals/crystal.txt 580 920 100 0.5
# Dragon-curve: python fractals.py fractals/dragon-curve.txt 960 540 200 0.75
# Hilbert-curve python fractals.py fractals/hilbert-curve.txt 1920 1080 250 0.67
# Hilbert-curve-II python fractals.py fractals/hilbert-curve-II.txt 0 1080 50 0.7
# Koch-snowflake: python fractals.py fractals/koch-snowflake.txt 1200 900 100 0.5
# Krishna-anklets python fractals.py fractals/krishna-anklets.txt 1400 550 60 0.8
# Levy-curve python fractals.py fractals/levy-curve.txt 1100 750 70 0.8
# Moore-curve python fractals.py fractals/moore-curve.txt 1000 1080 50 0.8
# no_name python fractals.py fractals/no_name.txt 960 1020 120 0.51
# Peano-curve python fractals.py fractals/peano-curve.txt 0 1080 70 0.7
# Peano-Gosper-curve: python fractals.py fractals/peano-gosper-curve.txt 600 280 200 0.5
# Pentaplexity python fractals.py fractals/pentaplexity.txt 550 850 150 0.5
# Plant: python fractals.py fractals/plant.txt 960 1000 100 0.6
# Quadratic-Gosper python fractals.py fractals/quadratic-gosper.txt 1920 1080 70 0.61
# Quadratic-Koch-island python fractals.py fractals/quadratic-koch-island.txt 950 850 50 0.5
# Quadratic-snowflake python fractals.py fractals/quadratic-snowflake.txt 500 1000 50 0.52
# Rings: python fractals.py fractals/rings.txt 700 250 60 0.5
# Sierpinski-arrowhead python fractals.py fractals/sierpinski-arrowhead.txt 1300 1000 90 0.7
# Sierpinski-carpet python fractals.py fractals/sierpinski-carpet.txt 500 1020 50 0.6
# Sierpinski-curve: python fractals.py fractals/sierpinski-curve.txt 500 550 200 0.52
# Sierpinski-sieve: python fractals.py fractals/sierpinski-sieve.txt 1200 950 400 0.5
# Terdragon-curve python fractals.py fractals/terdragon-curve.txt 400 500 200 0.7
# Three-dragon-curve python fractals.py fractals/three-dragon-curve.txt 600 550 40 0.88
# Tiles python fractals.py fractals/tiles.txt 900 800 30 0.75
# Tree: python fractals.py fractals/tree.txt 960 950 250 0.5
# Triangle python fractals.py fractals/triangle.txt 1000 250 60 0.8
# Twin-dragon-curve python fractals.py fractals/twin-dragon-curve.txt 1000 250 90 0.8
# William-McWorter-Maze01 python fractals.py fractals/William_McWorter_Maze01.txt 1100 750 50 0.8
# William-McWorter-Moore python fractals.py fractals/William_McWorter_Moore.txt 900 350 100 0.5
# William-McWorter-Pentant python fractals.py fractals/William_McWorter_Pentant.txt 1000 120 90 0.39
# William-McWorter-Pentl python fractals.py fractals/William_McWorter_Pentl.txt 1400 400 90 0.5
|
[
"noreply@github.com"
] |
simrit1.noreply@github.com
|
fb4ea243547d2893b0fc90f79afa28cdd4b3a796
|
961580252a30afb63cfec05debdab039741a4573
|
/src/truck.py
|
bfe44c6bdd034755d3c21a886b876f5e6a3354a0
|
[] |
no_license
|
gokceozden/capraz_sevkiyat
|
f7fdfa5e344c5db1f094de50878c3e3099de2fd0
|
78c374b55c4abf087f4a89e051a361f1182b8db0
|
refs/heads/master
| 2021-01-21T05:02:46.887208
| 2016-06-20T14:16:12
| 2016-06-20T14:16:12
| 34,739,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
from PyQt5.QtCore import *
class Truck(QObject):
"""
General truck class with common types and functions
"""
def __init__(self):
QObject.__init__(self)
self.truck_name = None
self.current_time = 0
self.function_list = []
self.times = {'arrival_time': 0}
self.current_state = 0
self.state_signal = False
self.behaviour_list = []
self.relevant_data = None
self.changeover_time = 0
self.next_state_time = 0
self.current_door = None
self.finish_time = 0
def run(self, current_time):
self.current_time = current_time
self.function_list[self.current_state]()
if self.state_signal:
self.state_signal = False
return 1
return 0
def coming(self):
if self.times['arrival_time'] == self.current_time:
self.times['arrived'] = self.current_time
self.next_state()
def next_state(self, name=None):
self.state_signal = True
if name:
print('name')
print(self.behaviour_list.index('loading'))
self.current_state = self.behaviour_list.index(name)
else:
self.current_state += 1
|
[
"mparlaktuna@gmail.com"
] |
mparlaktuna@gmail.com
|
d2b852072eab9b442cdc09671d692a18ba683652
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/fetch/api/resources/preflight.py
|
f983ef952272a75a6706d3cdfabb08aced7efc7b
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 3,394
|
py
|
def main(request, response):
headers = [(b"Content-Type", b"text/plain")]
stashed_data = {b'control_request_headers': b"", b'preflight': b"0", b'preflight_referrer': b""}
token = None
if b"token" in request.GET:
token = request.GET.first(b"token")
if b"origin" in request.GET:
for origin in request.GET[b'origin'].split(b", "):
headers.append((b"Access-Control-Allow-Origin", origin))
else:
headers.append((b"Access-Control-Allow-Origin", b"*"))
if b"clear-stash" in request.GET:
if request.server.stash.take(token) is not None:
return headers, b"1"
else:
return headers, b"0"
if b"credentials" in request.GET:
headers.append((b"Access-Control-Allow-Credentials", b"true"))
if request.method == u"OPTIONS":
if not b"Access-Control-Request-Method" in request.headers:
response.set_error(400, u"No Access-Control-Request-Method header")
return b"ERROR: No access-control-request-method in preflight!"
if request.headers.get(b"Accept", b"") != b"*/*":
response.set_error(400, u"Request does not have 'Accept: */*' header")
return b"ERROR: Invalid access in preflight!"
if b"control_request_headers" in request.GET:
stashed_data[b'control_request_headers'] = request.headers.get(b"Access-Control-Request-Headers", None)
if b"max_age" in request.GET:
headers.append((b"Access-Control-Max-Age", request.GET[b'max_age']))
if b"allow_headers" in request.GET:
headers.append((b"Access-Control-Allow-Headers", request.GET[b'allow_headers']))
if b"allow_methods" in request.GET:
headers.append((b"Access-Control-Allow-Methods", request.GET[b'allow_methods']))
preflight_status = 200
if b"preflight_status" in request.GET:
preflight_status = int(request.GET.first(b"preflight_status"))
stashed_data[b'preflight'] = b"1"
stashed_data[b'preflight_referrer'] = request.headers.get(b"Referer", b"")
stashed_data[b'preflight_user_agent'] = request.headers.get(b"User-Agent", b"")
if token:
request.server.stash.put(token, stashed_data)
return preflight_status, headers, b""
if token:
data = request.server.stash.take(token)
if data:
stashed_data = data
if b"checkUserAgentHeaderInPreflight" in request.GET and request.headers.get(b"User-Agent") != stashed_data[b'preflight_user_agent']:
return 400, headers, b"ERROR: No user-agent header in preflight"
#use x-* headers for returning value to bodyless responses
headers.append((b"Access-Control-Expose-Headers", b"x-did-preflight, x-control-request-headers, x-referrer, x-preflight-referrer, x-origin"))
headers.append((b"x-did-preflight", stashed_data[b'preflight']))
if stashed_data[b'control_request_headers'] != None:
headers.append((b"x-control-request-headers", stashed_data[b'control_request_headers']))
headers.append((b"x-preflight-referrer", stashed_data[b'preflight_referrer']))
headers.append((b"x-referrer", request.headers.get(b"Referer", b"")))
headers.append((b"x-origin", request.headers.get(b"Origin", b"")))
if token:
request.server.stash.put(token, stashed_data)
return headers, b""
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
29b46029d177011615a88307afbb80e6f4f05d35
|
bd3a58fac4d2547f87ca00d1faf73f6bcf781cef
|
/tests/test_matrix.py
|
89c31d7c8a777517b674b6fe1b9cbc9b86d14af2
|
[
"MIT"
] |
permissive
|
Nachtfeuer/pipeline
|
0a41f5fef2672678dbbbe33c7ee9cbd5e21bc9d5
|
ee15d98f4d8f343d57dd5b84339ea41b4e2dc673
|
refs/heads/master
| 2023-01-23T10:18:47.171697
| 2021-07-11T09:08:38
| 2021-07-11T09:08:38
| 106,919,631
| 30
| 6
|
MIT
| 2022-12-26T20:28:07
| 2017-10-14T10:41:32
|
Python
|
UTF-8
|
Python
| false
| false
| 7,090
|
py
|
"""Testing of class Stage."""
# pylint: disable=no-self-use, invalid-name
import unittest
from hamcrest import assert_that, equal_to
from spline.components.config import ApplicationOptions
from spline.matrix import Matrix, MatrixProcessData, matrix_worker
class TestMatrix(unittest.TestCase):
"""Testing of class Matrix."""
def test_simple_with_one_entry(self):
"""Testing simple matrix with one entry."""
matrix_definition = [{'name': 'one', 'env': {'message': 'hello'}}]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo tasks1:hello1''', 'when': ''}},
{'shell': {'script': '''echo tasks1:hello2''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml')
matrix = Matrix(matrix_definition, parallel=False)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('tasks1:hello1'))
assert_that(output[1], equal_to('tasks1:hello2'))
def test_with_tags_and_filter_ordered(self):
"""Testing simple matrix with tags and filtering."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}, 'tags': ['group-a']},
{'name': 'two', 'env': {'message': 'hello2'}, 'tags': ['group-b']},
{'name': 'three', 'env': {'message': 'hello3'}, 'tags': ['group-a']}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo $message''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml', matrix_tags='group-a')
matrix = Matrix(matrix_definition, parallel=False)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello3'))
def test_with_tags_and_filter_parallel(self):
"""Testing simple matrix with tags and filtering."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}, 'tags': ['group-a']},
{'name': 'two', 'env': {'message': 'hello2'}, 'tags': ['group-b']},
{'name': 'three', 'env': {'message': 'hello3'}, 'tags': ['group-a']}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo $message''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml', matrix_tags='group-a')
matrix = Matrix(matrix_definition, parallel=True)
result = matrix.process(process_data)
output = sorted([line for line in result['output'] if line.find("hello") >= 0])
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello3'))
def test_failed_ordered(self):
"""Testing failed ordered."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}},
{'name': 'two', 'env': {'message': 'hello2'}}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''exit 123''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml')
matrix = Matrix(matrix_definition, parallel=False)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(0))
def test_failed_parallel(self):
"""Testing failed parallel."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}},
{'name': 'two', 'env': {'message': 'hello2'}}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''exit 123''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml')
matrix = Matrix(matrix_definition, parallel=True)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(0))
def test_matrix_worker(self):
"""Testing worker for matrix used in multiprocessing."""
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo $message''', 'when': ''}}]}]}]
result = matrix_worker({
'matrix': {'name': 'one', 'env': {'message': 'hello1'}},
'pipeline': pipeline_definition,
'model': {},
'options': ApplicationOptions(definition='fake.yaml'),
'hooks': None
})
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(1))
def test_dry_run(self):
"""Testing simple matrix with tags and filtering."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}},
{'name': 'two', 'env': {'message': 'hello2'}},
{'name': 'three', 'env': {'message': 'hello3'}}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo {{ env.message }}''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml', dry_run=True)
matrix = Matrix(matrix_definition, parallel=True)
result = matrix.process(process_data)
output = [line for line in result['output'] if len(line) > 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(6))
assert_that(output[0], equal_to('#!/bin/bash'))
assert_that(output[1], equal_to('echo hello1'))
assert_that(output[2], equal_to('#!/bin/bash'))
assert_that(output[3], equal_to('echo hello2'))
assert_that(output[4], equal_to('#!/bin/bash'))
assert_that(output[5], equal_to('echo hello3'))
|
[
"thomas.lehmann.private@gmail.com"
] |
thomas.lehmann.private@gmail.com
|
c03fe4e820efd8100e1a25426d3e4f808af557d2
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/cEzT2e8tLpwYnrstP_16.py
|
968877026dfd242524f4110a45bf84a10221c0d0
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
"""
Create a function that takes:
1. A list of keys.
2. A list of values (same size).
3. `True`, if key and value should be swapped, else `False`.
The function returns the constructed dict. Empty lists return an empty dict.
### Examples
swap_d([1, 2, 3], ["one", "two", "three"], False)
➞ { 1: "one", 2: "two", 3: "three" }
swap_d([1, 2, 3], ["one", "two", "three"], True)
➞ { "one": 1, "two": 2, "three": 3 }
swap_d(["Paris", 3, 4.5], ["France", "is odd", "is half of 9"], True)
➞ { "France": "Paris", "is odd": 3, "is half of 9": 4.5 }
### Notes
* To make it simple, use only hashable (= immutable) keys.
* To make it simple, use only unique keys.
"""
def swap_d(k, v, swapped):
if swapped:
k, v = v, k
output = {k[i]:v[i] for i in range(len(k))}
return output
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
42e1f9b04e2ca7c82ba069ee31d79bfc45840003
|
d15bdaddab59d1cfea76790004cbad3e5f0c2c55
|
/batkin/build_isolated/joy/catkin_generated/pkg.develspace.context.pc.py
|
049bfc6580751b52bcf56b4d0bc9f765466f9dcc
|
[] |
no_license
|
gychen-n/robot
|
4265a1ff469d22550b6b537d1c81aa846ee7641a
|
0663a33aea2c2de9e3ac5863307619091e5b5959
|
refs/heads/main
| 2023-04-10T13:32:06.623682
| 2021-04-16T00:41:04
| 2021-04-16T00:41:04
| 358,431,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;diagnostic_updater;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "joy"
PROJECT_SPACE_DIR = "/home/robot/batkin/devel_isolated/joy"
PROJECT_VERSION = "1.11.0"
|
[
"gyc@autolabor-host.autolabor-domain"
] |
gyc@autolabor-host.autolabor-domain
|
fb5bf4f07951d84bf342ddda620e4f8ab7c0a109
|
f65c78a69fbf9acb39f5b55a77565c491accccd1
|
/libs/common/bitcoin/script.py
|
5e4daafb10d6b6d794552c76082a923f8d133d76
|
[
"MIT"
] |
permissive
|
Kevingislason/abacus_wallet
|
8ac5b9f5edc33cba3812f98e0040729e8be6bd98
|
3c0f2d5134a6fa59fc1fd15fcad65690352a46bf
|
refs/heads/main
| 2023-03-21T23:24:16.012416
| 2021-03-15T02:56:11
| 2021-03-15T02:56:11
| 334,851,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,916
|
py
|
from .networks import NETWORKS
from . import base58
from . import bech32
from . import hashes
from . import compact
import io
SIGHASH_ALL = 1
class Script:
def __init__(self, data: bytes):
self.data = data[:]
def address(self, network=NETWORKS["main"]) -> str:
script_type = self.script_type()
data = self.data
if script_type is None:
raise ValueError("This type of script doesn't have address representation")
if script_type == "p2pkh":
d = network["p2pkh"] + data[3:23]
return base58.encode_check(d)
if script_type == "p2sh":
d = network["p2sh"] + data[2:22]
return base58.encode_check(d)
if script_type == "p2wpkh" or script_type == "p2wsh":
return bech32.encode(network["bech32"], data[0], data[2:])
# we should never get here
raise ValueError("Unsupported script type")
def script_type(self):
data = self.data
# OP_DUP OP_HASH160 <20:hash160(pubkey)> OP_EQUALVERIFY OP_CHECKSIG
if len(data) == 25 and data[:3] == b"\x76\xa9\x14" and data[-2:] == b"\x88\xac":
return "p2pkh"
# OP_HASH160 <20:hash160(script)> OP_EQUAL
if len(data) == 23 and data[:2] == b"\xa9\x14" and data[-1] == 0x87:
return "p2sh"
# 0 <20:hash160(pubkey)>
if len(data) == 22 and data[:2] == b"\x00\x14":
return "p2wpkh"
# 0 <32:sha256(script)>
if len(data) == 34 and data[:2] == b"\x00\x20":
return "p2wsh"
# unknown type
return None
def serialize(self) -> bytes:
return compact.to_bytes(len(self.data)) + self.data
@classmethod
def parse(cls, b: bytes) -> cls:
stream = io.BytesIO(b)
script = cls.read_from(stream)
if len(stream.read(1)) > 0:
raise ValueError("Too many bytes")
return script
@classmethod
def read_from(cls, stream) -> cls:
l = compact.read_from(stream)
data = stream.read(l)
if len(data) != l:
raise ValueError("Cant read %d bytes" % l)
return cls(data)
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return self.data != other.data
class Witness:
def __init__(self, items):
self.items = items[:]
def serialize(self) -> bytes:
res = compact.to_bytes(len(self.items))
for item in self.items:
res += compact.to_bytes(len(item)) + item
return res
@classmethod
def parse(cls, b: bytes) -> cls:
stream = io.BytesIO(b)
r = cls.read_from(stream)
if len(stream.read(1)) > 0:
raise ValueError("Byte array is too long")
return r
@classmethod
def read_from(cls, stream) -> cls:
num = compact.read_from(stream)
items = []
for i in range(num):
l = compact.read_from(stream)
data = stream.read(l)
items.append(data)
return cls(items)
def p2pkh(pubkey) -> Script:
"""Return Pay-To-Pubkey-Hash ScriptPubkey"""
return Script(b"\x76\xa9\x14" + hashes.hash160(pubkey.sec()) + b"\x88\xac")
def p2sh(script) -> Script:
"""Return Pay-To-Script-Hash ScriptPubkey"""
return Script(b"\xa9\x14" + hashes.hash160(script.data) + b"\x87")
def p2wpkh(pubkey) -> Script:
"""Return Pay-To-Witness-Pubkey-Hash ScriptPubkey"""
return Script(b"\x00\x14" + hashes.hash160(pubkey.sec()))
def p2wsh(script) -> Script:
"""Return Pay-To-Witness-Pubkey-Hash ScriptPubkey"""
return Script(b"\x00\x20" + hashes.sha256(script.data))
def p2pkh_from_p2wpkh(script) -> Script:
"""Convert p2wpkh to p2pkh script"""
return Script(b"\x76\xa9" + script.serialize()[2:] + b"\x88\xac")
def multisig(m: int, pubkeys) -> Script:
if m <= 0 or m > 16:
raise ValueError("m must be between 1 and 16")
n = len(pubkeys)
if n < m or n > 16:
raise ValueError("Number of pubkeys must be between %d and 16" % m)
data = bytes([80 + m])
for pubkey in pubkeys:
sec = pubkey.sec()
data += bytes([len(sec)]) + sec
# OP_m <len:pubkey> ... <len:pubkey> OP_n OP_CHECKMULTISIG
data += bytes([80 + n, 0xAE])
return Script(data)
def address_to_scriptpubkey(addr):
pass
def script_sig_p2pkh(signature, pubkey) -> Script:
sec = pubkey.sec()
der = signature.serialize() + bytes([SIGHASH_ALL])
data = compact.to_bytes(len(der)) + der + compact.to_bytes(len(sec)) + sec
return Script(data)
def script_sig_p2sh(redeem_script) -> Script:
"""Creates scriptsig for p2sh"""
# FIXME: implement for legacy p2sh as well
return Script(redeem_script.serialize())
def witness_p2wpkh(signature, pubkey) -> Witness:
return Witness([signature.serialize() + bytes([SIGHASH_ALL]), pubkey.sec()])
|
[
"snigirev.stepan@gmail.com"
] |
snigirev.stepan@gmail.com
|
1ce44c79b815d8df6680a3212721e78e49837ae6
|
138c1b7a726386b6e9bafb9fcd42c7e62d9fe49e
|
/ScrapyProjects/DynamicSpider/DynamicSpider/spiders/guazi.py
|
b662668c881943737dbbd1a9bd8a5ed621eb1462
|
[] |
no_license
|
zhujixiang1997/1805_spider
|
951bf6e5d411c0b24b33adf788c9d8a5770f244b
|
5cd617e5e3263d13854e19c16a1659017a8ed409
|
refs/heads/master
| 2022-12-11T01:10:54.232536
| 2019-07-09T10:28:30
| 2019-07-09T10:28:30
| 163,835,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,575
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from ScrapyProjects.DynamicSpider.DynamicSpider.items import GuaziCarItem
from ScrapyProjects.DynamicSpider.DynamicSpider.utils.bshead import create_bs_driver
'''
爬取瓜子二手车直卖网武汉二手车
分析:采用scrapy shrll爬取页面,分析页面后,发现获取不到数据,引入selenium
方案:scrapy + selenium
'''
class GuaziSpider(scrapy.Spider):
name = 'guazi'
allowed_domains = ['www.guazi.com']
start_urls = ['http://www.guazi.com/wh/buy/']
query_key = input("请输入关键字:")
def __init__(self):
scrapy.Spider.__init__(self, self.name)
self.driver = create_bs_driver()
self.driver.set_page_load_timeout(20)
def __del__(self):
self.driver.quit()
def start_requests(self):
# 重写初始化url请求,携带上信息,下载中间价能识别
for url in self.start_urls:
yield scrapy.Request(url=url, meta={'type':'home','query_key':self.query_key}, callback=self.parse, dont_filter=True)
def parse(self, response):
print(f"{response.url}")
cal_li_list = response.xpath("//ul[@class='carlist clearfix js-top']/li")
for cal_li in cal_li_list:
car_name = cal_li.xpath("./a/h2/text()").extract_first()
car_image = cal_li.xpath("./a/img/@src").extract_first()
car_detail_url = cal_li.xpath("./a/@href").extract_first()
meta=dict(car_name=car_name,car_image=car_image,type="detail")
yield scrapy.Request(url=f"https://www.guazi.com{car_detail_url}", meta=meta, callback=self.parse_detail, dont_filter=True)
# 获取下一页
next_url = response.url
meta = dict(type="next_page")
yield scrapy.Request(url=next_url, meta=meta, callback=self.parse, dont_filter=True)
def parse_detail(self,response):
car_name=response.meta.get("car_name")
car_image=response.meta.get("car_image")
registration_time = response.xpath("//ul[@class='assort clearfix']/li[1]/span/text()").extract_first()
mileage = response.xpath("//ul[@class='assort clearfix']/li[2]/span/text()").extract_first()
license_plate = response.xpath("//ul[@class='assort clearfix']/li[3]/span/text()").extract_first()
displacement = response.xpath("//ul[@class='assort clearfix']/li[4]/span/text()").extract_first()
transmission = response.xpath("//ul[@class='assort clearfix']/li[5]/span/text()").extract_first()
price = response.xpath("//div[@class='pricebox js-disprice']/span[1]/text()").extract_first()
result = {
'car_name':car_name if car_name else None,
'car_image':car_image if car_image else None,
'registration_time':registration_time if registration_time else None,
'mileage':mileage if mileage else None,
'license_plate':license_plate if license_plate else None,
'displacement':displacement if displacement else None,
'transmission':transmission if transmission else None,
'price':price+'万' if price else None,
}
item = GuaziCarItem(
car_name=result['car_name'],
car_image=result['car_image'],
registration_time=result['registration_time'],
mileage=result['mileage'],
license_plate=result['license_plate'],
displacement=result['displacement'],
transmission=result['transmission'],
price=result['price'],
)
yield item
|
[
"1164355115@qq.com"
] |
1164355115@qq.com
|
32d15c69d2a035c6bbc6bbe67bbb271dd540c1f0
|
731230c336bf27af8ca91f15c33435920a5c3af4
|
/virtual/bin/wheel
|
599ecec06807b1ead412bf660847330f6744e209
|
[
"MIT"
] |
permissive
|
Brayonski/personal-blog
|
c627adbddf66271594f07e7bd3e3b2069c9aff08
|
ab0cb6590b570ed75a475a52eae9bafadc647665
|
refs/heads/master
| 2020-03-28T17:34:10.867299
| 2018-09-17T13:10:12
| 2018-09-17T13:10:12
| 148,802,537
| 0
| 0
| null | 2018-10-11T13:39:58
| 2018-09-14T14:48:33
|
Python
|
UTF-8
|
Python
| false
| false
| 281
|
#!/media/root/Alpha/projects/MS-Python-Pre-work/flask/personal-blog/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
|
3210b53b1f11a7512ae651e9a24340fa8190d8c2
|
cfd9fa1af735ac3572954704a47e35543850b244
|
/lantern.py
|
6811ad214abe5ef2f1a2dfe9f75e48022f4f9798
|
[] |
no_license
|
xingyueGK/hjsg
|
c1844ea8161d254f6d6cf70f42d1ac849e117438
|
be0c4c457bdfaa9178f25f9f722dc78d88f24540
|
refs/heads/master
| 2022-12-12T08:28:55.823357
| 2020-12-05T12:02:06
| 2020-12-05T12:02:06
| 147,184,573
| 0
| 1
| null | 2022-01-06T22:26:48
| 2018-09-03T09:47:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,224
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/19 15:08
# @Author : xingyue
# @File : lantern.py
from task.base import SaoDangFb
import threading
import os, time
import redis
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
_redis = redis.StrictRedis(connection_pool=pool)
lock = threading.RLock()
class task(SaoDangFb):
def lanternIndex(self):
stats = self.action(c='guess_lantern', m='index')
if stats['status']== 1:
print '开始答题'
else:
self.p(stats)
exit(2)
def lantern_festival(self):
try:
answer = {
"1": "a",
"2": "b",
"3": "c",
"4": "d",
"5": "b",
"6": "d",
"7": "d",
"8": "d",
"9": "d",
"10": "a",
"11": "a",
"12": "a",
"13": "b",
"14": "b",
"15": "a",
"16": "c",
"17": "b",
"18": "d",
"19": "a",
"20": "c",
"21": "c",
"22": "a",
"23": "d",
"24": "a",
"25": "a",
"26": "c",
"27": "a",
"28": "b",
"29": "a",
"30": "a",
"31": "a",
"32": "b",
"33": "b",
"34": "b",
"35": "c",
"36": "c",
"37": "d",
"38": "d",
"39": "c",
"40": "b",
"41": "a",
"42": "a",
"44": "a",
"45": "b",
"46": "c",
"48": "a",
"49": "b",
"50": "d",
"51": "c",
"52": "a",
"54": "a",
"55": "d",
"56": "d",
"58": "b",
"59": "b",
"61": "d",
"62": "d",
"63": "d",
"67": "b",
"68": "a",
"69": "b",
"71": "d",
"73": "b",
"74": "a",
"75": "d",
"76": "a",
"77": "b",
"78": "b",
"43": "d",
"47": "d",
"53": "c",
"57": "d",
"60": "c",
"64": "d",
"65": "d",
"66": "b",
"70": "c",
"72": "a",
"79": "c",
"80": "a",
"81": "a",
"82": "d",
"83": "b",
"84": "a",
"85": "c",
"86": "b",
"87": "b",
"88": "b",
"89": "d",
"90": "d",
"91": "b",
"92": "c",
"93": "c",
"94": "b",
"95": "c",
"96": "a",
"97": "d",
"98": "d",
"99": "a",
"100": "c",
"101": "c",
"102": "a",
"103": "b",
"104": "a",
"105": "c",
"106": "a",
"107": "a",
"108": "b",
"109": "c",
"110": "b",
"111": "d",
"112": "b",
"113": "d",
"114": "b",
"115": "a",
"116": "a",
"117": "b",
"118": "b",
"119": "c",
"120": "d",
}
resutl = self.action(c='guess_lantern', m='answer_index')
time.sleep(0.5)
total_num = int(resutl['total_num'])
for i in range(total_num):
questiont = resutl['question']
id = questiont['id']
try:
formdata = {
'right': answer[id]
}
except KeyError as e:
print 'id error ,chaoguo xianzhi '
self.p(questiont, 'iderror')
formdata = {
'right': 'a'
}
resutl = self.action(c='guess_lantern', m='check', body=formdata)
self.p(resutl,'resieeeeeeee')
while True:
if resutl['status'] == 1:
if resutl['right'] == 1:
time.sleep(2)
break
else:
self.p(resutl, 'check result')
print formdata
break
elif resutl['status'] == -10:
time.sleep(5)
resutl = self.action(c='guess_lantern', m='check', body=formdata)
except KeyError as e:
self.p(resutl, 'error')
print 'eeeee',e
def get_reward(self):
self.action(c='guess_lantern', m='get_reward', id=1)
if __name__ == '__main__':
def act(user, apass, addr):
action = task(user, apass, addr)
action.lanternIndex()#开始答题
action.lantern_festival()
action.get_reward()
filepath = os.path.dirname(os.path.abspath(__file__))
# cont = ['21user.txt', 'autouser.txt','gmnewyear.txt', 'user.txt', 'alluser.txt']
cont = ['user.txt']
for t in cont:
with open('%s/users/%s' % (filepath, t), 'r') as f:
for i in f:
if i.strip() and not i.startswith('#'):
name = i.split()[0]
passwd = i.split()[1]
addr = i.split()[2]
# addr = 21
t1 = threading.Thread(target=act, args=(name, passwd, addr))
t1.start()
time.sleep(0.2)
|
[
"a413728161@vip.qq.com"
] |
a413728161@vip.qq.com
|
068b41a94bea56acb225e23b723347f3b9e3b552
|
8fc9520d7224e6179f63f19e668b4b3b6a7d76c5
|
/apps/networks/libraries/flickr/tools.py
|
983d95c4458171972fa5c25ad14ae1aa7f71d267
|
[] |
no_license
|
quantm/custom_django_oscar
|
352ef2fd95e7da932958d4aa80d77dff5b6c1e70
|
9205807030ab360884283810e94177440c228a23
|
refs/heads/master
| 2016-08-09T12:23:39.413677
| 2016-02-08T22:16:53
| 2016-02-08T22:16:53
| 51,326,524
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
from method_call import call_api
import sys
import os
def load_methods():
"""
Loads the list of all methods
"""
r = call_api(method="flickr.reflection.getMethods")
return r["methods"]["method"]
__perms__ = {0: 'none', '1': 'read', '2': 'write', '3': 'delete'}
def methods_info():
methods = {}
for m in load_methods():
info = call_api(method="flickr.reflection.getMethodInfo",
method_name=m)
info.pop("stat")
method = info.pop("method")
method["requiredperms"] = __perms__[method["requiredperms"]]
method["needslogin"] = bool(method.pop("needslogin"))
method["needssigning"] = bool(method.pop("needssigning"))
info.update(method)
info["arguments"] = info["arguments"]["argument"]
info["errors"] = info["errors"]["error"]
methods[m] = info
return methods
def write_reflection(path, template, methods=None):
if methods is None:
methods = methods_info()
with open(template, "r") as t:
templ = t.read()
prefix = ""
new_templ = ""
tab = " "
templ = templ % str(methods)
for c in templ:
if c == '{':
new_templ += '{\n' + prefix
prefix += tab
elif c == '}':
new_templ += '\n' + prefix + '}\n' + prefix
prefix = prefix[:-len(tab)]
else:
new_templ += c
with open(path, "w") as f:
f.write(new_templ)
def write_doc(output_path, exclude=["flickr_keys", "methods"]):
import flickr_api
exclude.append("__init__")
modules = ['flickr_api']
dir = os.path.dirname(flickr_api.__file__)
modules += [
"flickr_api." + f[:-3]
for f in os.listdir(dir)
if f.endswith(".py") and f[:-3] not in exclude]
sys.path.insert(0, dir + "../")
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir(output_path)
for m in modules:
os.system("pydoc -w " + m)
|
[
"012kinglight@gmail.com"
] |
012kinglight@gmail.com
|
af55ae8008e31b19d12765e34394ed945e13896b
|
f672f2c2b35d388526217278f3c301d4372abb4b
|
/cartoview/app_manager/migrations/0001_initial.py
|
31b23a5245af1452dea592580640ac9fb684c4cb
|
[
"BSD-2-Clause"
] |
permissive
|
Msalah593/cartoview_2
|
cce51db13f3e69e99a4915770627942d21a998a8
|
dc57cc22cdc4563ff76448b939c09c370590114f
|
refs/heads/master
| 2020-05-05T12:29:26.333491
| 2019-04-22T08:52:52
| 2019-04-22T08:52:52
| 180,030,874
| 0
| 0
| null | 2019-04-07T22:22:08
| 2019-04-07T22:22:08
| null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
# Generated by Django 2.2 on 2019-04-18 14:17
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='App',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('title', models.CharField(max_length=200, unique=True)),
('description', models.TextField(blank=True, null=True)),
('license', models.CharField(blank=True, max_length=200, null=True)),
('date_installed', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Date Installed')),
('single_instance', models.BooleanField(default=False)),
('status', models.CharField(default='Alpha', max_length=100)),
('app_img_url', models.TextField(blank=True, max_length=1000, null=True)),
('version', models.CharField(max_length=10)),
('order', models.IntegerField(default=0, unique=True)),
('default_config', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['order'],
'permissions': (('install_app', 'Install App'), ('uninstall_app', 'Uninstall App'), ('change_state', 'Change App State (active, suspend)')),
},
),
]
|
[
"hisham.karam@cartologic.com"
] |
hisham.karam@cartologic.com
|
b51f457a805186eaa93adc2cc94cb037a560f42a
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/exercises/1901040058/1001S02E05_array.py.py
|
9b79494c7bd80613fe0fbe7ad3d6749416df796a
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008
| 2022-03-17T07:56:30
| 2022-03-17T07:56:30
| 201,287,222
| 9
| 6
| null | 2019-08-08T15:34:26
| 2019-08-08T15:34:25
| null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
array.reverse()
print(array)
str1=''.join('%s'%id for id in array)
print(str1)
str2=str1[2:8]
print(str2)
str3=str2[::-1]
print(str3)
int1=int(str3)
print(int1)
int2="{0:b}".format(int1)
print(int2)
int3="{0:o}".format(int1)
print(int3)
int4="{0:x}".format(int1)
print(int4)
|
[
"40155646+seven-tears@users.noreply.github.com"
] |
40155646+seven-tears@users.noreply.github.com
|
1fe2794cabf63d91afdf446afb6eda5ac5eac6e6
|
e305ea0e2b84b1cbb138e4443d13c915d7f467cd
|
/面试题58 - I翻转单词顺序.py
|
063790615dc5a9a89d3f8b2617ae65f2da976cfb
|
[] |
no_license
|
JoanWu5/jianzhi-offer
|
770d7fd903779e3e530386705a9a513224a05539
|
580287d9bcf288d374b64b5a87c9921733a7a4f9
|
refs/heads/master
| 2022-10-11T10:36:51.837879
| 2020-06-06T08:59:16
| 2020-06-06T08:59:16
| 268,311,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
# s = s.strip().split()
# result = []
# for i in range(len(s)-1,-1,-1):
# if s[i] != ' ':
# result.append(s[i])
# return ' '.join(result)
result = []
s = s.strip()
i = j = len(s)-1
while i>=0:
while i>=0 and s[i]!=' ':
i-=1
result.append(s[i+1:j+1])
while s[i]== ' ':
i-=1
j=i
return ' '.join(result)
s = Solution()
print(s.reverseWords("a good example"))
|
[
"394104840@qq.com"
] |
394104840@qq.com
|
6c7782966748458a5b72ad96b769db9cf5d70920
|
c9fcf2ff1acd16a423c47617145cde00cc1936aa
|
/tests/unit/api/test_task.py
|
eb114f77afdfcf11338986fe6f0b9774b96cf0bd
|
[
"MIT"
] |
permissive
|
JonathanAlcantara/fastlane
|
766dd6701fcf172b6d7bb38983e19bd596cbf0d7
|
dd923f0769281e94da98c4de39c57e3d447aeea3
|
refs/heads/master
| 2020-04-28T19:33:45.310622
| 2019-03-12T20:46:42
| 2019-03-12T20:46:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,027
|
py
|
# Standard Library
from json import loads
from uuid import uuid4
# 3rd Party
import pytest
from preggy import expect
# Fastlane
from fastlane.models.task import Task
def test_get_tasks(client):
"""Test getting tasks"""
Task.create_task("my-task-1")
Task.create_task("my-task-2")
Task.create_task("my-task-3")
resp = client.get("/tasks/")
expect(resp.status_code).to_equal(200)
data = loads(resp.data)
expect(data["items"]).to_length(3)
expect(data["total"]).to_equal(3)
expect(data["page"]).to_equal(1)
expect(data["pages"]).to_equal(1)
expect(data["perPage"]).to_equal(3)
expect(data["hasNext"]).to_be_false()
expect(data["hasPrev"]).to_be_false()
def test_get_tasks2(client):
"""Test getting tasks returns CORS headers"""
resp = client.get("/tasks/")
expect(resp.status_code).to_equal(200)
headers = dict(resp.headers)
expect(headers).to_include("Access-Control-Allow-Origin")
expect(headers["Access-Control-Allow-Origin"]).to_equal("*")
def test_get_tasks3(client):
"""Test getting tasks returns CORS headers with custom origin"""
client.application.config["CORS_ORIGINS"] = "domain.com"
resp = client.get("/tasks/")
expect(resp.status_code).to_equal(200)
headers = dict(resp.headers)
expect(headers).to_include("Access-Control-Allow-Origin")
expect(headers["Access-Control-Allow-Origin"]).to_equal("*")
def test_get_tasks_data(client):
"""Test getting tasks resource data"""
task = Task.create_task("my-task")
resp = client.get("/tasks/")
data = loads(resp.data)
task_data = data["items"][0]
with client.application.app_context():
expect(task_data.keys()).to_equal(task.to_dict().keys())
def test_get_tasks_pagination(client):
"""Test getting tasks pagination"""
Task.create_task("my-task-1")
Task.create_task("my-task-2")
Task.create_task("my-task-3")
Task.create_task("my-task-4")
app = client.application
server_name = app.config["SERVER_NAME"]
resp = client.get("/tasks/?page=2")
data = loads(resp.data)
expect(data["total"]).to_equal(4)
expect(data["page"]).to_equal(2)
expect(data["hasNext"]).to_be_false()
expect(data["hasPrev"]).to_be_true()
expect(data["prevUrl"]).to_equal(f"http://{server_name}/tasks/?page=1")
expect(data["nextUrl"]).to_be_null()
def test_get_tasks_pagination2(client):
"""
Test getting tasks pagination should respond 400 when page is invalid
"""
resp1 = client.get("/tasks/?page=asdasdas")
expect(resp1.status_code).to_equal(400)
resp2 = client.get("/tasks/?page=1019021")
expect(resp2.status_code).to_equal(404)
resp3 = client.get("/tasks/?page=0")
expect(resp3.status_code).to_equal(400)
resp4 = client.get("/tasks/?page=-1")
expect(resp4.status_code).to_equal(400)
def test_get_task_details(client):
"""Test getting tasks"""
task_id = str(uuid4())
job_id = str(uuid4())
task = Task.create_task(task_id)
task.create_or_update_job(job_id, "ubuntu", "command")
resp = client.get(f"/tasks/{task_id}/")
expect(resp.status_code).to_equal(200)
data = loads(resp.data)
expect(data).to_include("jobs")
expect(data["jobs"]).to_length(1)
job_data = data["jobs"][0]
expect(job_data).to_include("id")
expect(job_data["id"]).to_equal(job_id)
expect(job_data["url"]).to_equal(
f"http://localhost:10000/tasks/{task_id}/jobs/{job_id}/"
)
def test_search_tasks1(client):
"""Tests search task by task_id."""
task_id = f"task-search-{str(uuid4())}"
Task.create_task(task_id)
Task.create_task(str(uuid4()))
Task.create_task(str(uuid4()))
resp = client.get("/search/?query=search")
expect(resp.status_code).to_equal(200)
data = loads(resp.data)
expect(data["items"]).to_length(1)
def test_search_tasks2(client):
"""
Test search tasks pagination should respond error when page is invalid
"""
resp1 = client.get("/search/?query=qwe&page=asdasdas")
expect(resp1.status_code).to_equal(400)
resp2 = client.get("/search/?query=qwe&page=1019021")
expect(resp2.status_code).to_equal(404)
resp3 = client.get("/search/?query=qwe&page=0")
expect(resp3.status_code).to_equal(400)
resp4 = client.get("/search/?query=qwe&page=-1")
expect(resp4.status_code).to_equal(400)
def test_job_details1(client):
"""Tests get job details returns proper details and last 20 execs."""
pytest.skip("Not implemented")
def test_job_stdout1(client):
"""Tests get job stdout returns log for last execution."""
pytest.skip("Not implemented")
def test_job_stdout2(client):
"""Tests get job stdout fails if invalid input."""
pytest.skip("Not implemented")
def test_job_stderr1(client):
"""Tests get job stderr returns log for last execution."""
pytest.skip("Not implemented")
def test_job_stderr2(client):
"""Tests get job stderr fails if invalid input."""
pytest.skip("Not implemented")
def test_job_logs1(client):
"""Tests get job logs returns log for last execution."""
pytest.skip("Not implemented")
def test_job_logs2(client):
"""Tests get job logs fails if invalid input."""
pytest.skip("Not implemented")
def test_stop_container1(client):
"""Tests that stopping a running container actually stops the container."""
pytest.skip("Not implemented")
def test_stop_container2(client):
"""Tests that stopping a scheduled job kills the scheduling."""
pytest.skip("Not implemented")
def test_stop_container3(client):
"""Tests that stopping a CRON job kills the scheduling."""
pytest.skip("Not implemented")
def test_stop_container4(client):
"""Tests that stopping without an end slash fails with 404."""
pytest.skip("Not implemented")
def test_stop_container5(client):
"""Tests that stopping a scheduled job with no executions actually kills the scheduled job."""
pytest.skip("Not implemented")
|
[
"heynemann@gmail.com"
] |
heynemann@gmail.com
|
e2bcdd2563f558acd6fe945a5fb664ab81c64eea
|
127e99fbdc4e04f90c0afc6f4d076cc3d7fdce06
|
/2021_하반기 코테연습/boj22858.py
|
655bd7eab90f926e09c88916a4e2769f02d0f280
|
[] |
no_license
|
holim0/Algo_Study
|
54a6f10239368c6cf230b9f1273fe42caa97401c
|
ce734dcde091fa7f29b66dd3fb86d7a6109e8d9c
|
refs/heads/master
| 2023-08-25T14:07:56.420288
| 2021-10-25T12:28:23
| 2021-10-25T12:28:23
| 276,076,057
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
from collections import defaultdict
n, k = map(int, input().split())
answer = []
after_k = list(map(int, input().split()))
d = list(map(int, input().split()))
for _ in range(k):
tmp = [0] *n
for i in range(n):
tmp[d[i]-1] = after_k[i]
after_k = tmp
for i in range(n):
print(after_k[i],end=" ")
|
[
"holim1226@gmail.com"
] |
holim1226@gmail.com
|
251820f0a584d8815135b3db62e5b44d48b87e58
|
4b5ee91dabf402522685cea452ba51c10dbb834e
|
/server/scrolls/migrations/0019_auto_20180608_1241.py
|
0e839793ab7e8a2ddbc96b05321e94c0dee0d579
|
[] |
no_license
|
unscrollinc/unscroll
|
8fb175d6cf9c2f91bdfc7a97a8da71beca7e702d
|
88168af51abf8a0bfa06dcc22bd0ec11b671d989
|
refs/heads/master
| 2023-01-10T03:15:17.737493
| 2021-01-28T20:57:57
| 2021-01-28T20:57:57
| 196,251,483
| 7
| 0
| null | 2023-01-09T22:06:57
| 2019-07-10T17:52:37
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 381
|
py
|
# Generated by Django 2.0.4 on 2018-06-08 12:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scrolls', '0018_auto_20180608_0404'),
]
operations = [
migrations.AlterUniqueTogether(
name='event',
unique_together={('by_user', 'in_scroll', 'title', 'source_url')},
),
]
|
[
"ford@ftrain.com"
] |
ford@ftrain.com
|
e13b598b3ab59e0ff74d9435e43e41c82855e572
|
5a4ab9ea5e3060bf7744853c0fa261af527876d6
|
/day03/orm_demo1/boo/migrations/0002_article.py
|
1d15706a8dcc361abb33ea0a575a577eafed267c
|
[] |
no_license
|
gaohj/jxlg_0304
|
3cabe3bc56d6a3e0a97f25bc1b684da27e0a0b96
|
9e34dba2abcb752fff692b2c25adb3defd098a87
|
refs/heads/master
| 2020-06-02T23:34:02.489012
| 2019-06-28T08:58:56
| 2019-06-28T08:58:56
| 191,345,040
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
# Generated by Django 2.0 on 2019-06-14 07:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('boo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('pub_time', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'articles',
'ordering': ['pub_time'],
},
),
]
|
[
"gaohj@126.com"
] |
gaohj@126.com
|
606d8cfb72f406daa19f033514b6c0387ef8ccc6
|
4542db1d4955aaf7c53c9ff7282d064a066ff393
|
/2020/December/20-Dec/command line argument,exception handling.py
|
763d53a7e21443113b29d545b869e123aa5501e4
|
[] |
no_license
|
mohanbabu2706/100
|
7227527b0e0af1e4f69d194b7537c7aef27a810d
|
3c5a8b769fd4205afb3e3fd7e9cbf2ebf053b7b9
|
refs/heads/master
| 2023-02-20T09:56:45.970290
| 2021-01-20T10:09:09
| 2021-01-20T10:09:09
| 297,233,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
#This program adds up integers that have been passed as arguments in the command line
import sys
try:
total = sum(int(arg)for arg in sys.argv[1:])
print('sum = ',total)
expect ValueError:
print('Please supply integer arguments')
|
[
"noreply@github.com"
] |
mohanbabu2706.noreply@github.com
|
4152263ec153a74dbe4b9a6e39b6c9bc8a66e341
|
9307c025d1611a9fd21b34543643a46a03a3d3a4
|
/orcamentos/core/management/commands/create_admin.py
|
220674e01324f0d30c05556b6c13f8580aa167ad
|
[
"MIT"
] |
permissive
|
projetosparalelos/orcamentos
|
74892ae46bfd86a4e8196fa3feb15cb55692ef03
|
af88f5a2b5d73f7ea6cf416d871714a0ebcdf252
|
refs/heads/master
| 2020-04-27T12:41:59.811244
| 2019-01-17T04:31:28
| 2019-01-17T04:31:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from orcamentos.crm.models import Employee
class Command(BaseCommand):
help = ''' Cria um usuário admin. '''
def handle(self, *args, **kwargs):
'''
Cria um Employee.
Precisamos de Employee para fazer todas as transações no sistema.
'''
username = 'admin'
first_name = 'Admin'
last_name = 'Admin'
email = 'admin@email.com'
user = Employee.objects.create(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
gender='I'
)
user.set_password('admin')
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
print('Usuário criado com sucesso.')
|
[
"rg3915@yahoo.com.br"
] |
rg3915@yahoo.com.br
|
7d366690a2e18aece41ce666bb4da73a35298049
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/lib/python3.7/site-packages/sympy/stats/stochastic_process.py
|
ad68ef5ac3b84750463e3b0a29e5a5de45eaf751
|
[
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
from __future__ import print_function, division
from sympy import Basic
from sympy.stats.joint_rv import ProductPSpace
from sympy.stats.rv import ProductDomain, _symbol_converter
class StochasticPSpace(ProductPSpace):
"""
Represents probability space of stochastic processes
and their random variables. Contains mechanics to do
computations for queries of stochastic processes.
Initialized by symbol, the specific process and
distribution(optional) if the random indexed symbols
of the process follows any specific distribution, like,
in Bernoulli Process, each random indexed symbol follows
Bernoulli distribution. For processes with memory, this
parameter should not be passed.
"""
def __new__(cls, sym, process, distribution=None):
sym = _symbol_converter(sym)
from sympy.stats.stochastic_process_types import StochasticProcess
if not isinstance(process, StochasticProcess):
raise TypeError("`process` must be an instance of StochasticProcess.")
return Basic.__new__(cls, sym, process, distribution)
@property
def process(self):
"""
The associated stochastic process.
"""
return self.args[1]
@property
def domain(self):
return ProductDomain(self.process.index_set,
self.process.state_space)
@property
def symbol(self):
return self.args[0]
@property
def distribution(self):
return self.args[2]
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Transfers the task of handling queries to the specific stochastic
process because every process has their own logic of handling such
queries.
"""
return self.process.probability(condition, given_condition, evaluate, **kwargs)
def compute_expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Transfers the task of handling queries to the specific stochastic
process because every process has their own logic of handling such
queries.
"""
return self.process.expectation(expr, condition, evaluate, **kwargs)
|
[
"nicolas.holzschuch@inria.fr"
] |
nicolas.holzschuch@inria.fr
|
054d37c07f6a302bf45d9cc67212e2cd1df7291a
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/DistributionType.py
|
d38e3a744760d77f5c4751c442f58658e92cb577
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
class DistributionType(Enum,IComparable,IFormattable,IConvertible):
"""
The type of the distribution
enum DistributionType,values: Uniform (0),VaryingLength (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Uniform=None
value__=None
VaryingLength=None
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
5c43f2e8a875e9f1939b98e2c16527d7a369d9b7
|
7e40c8bb28c2cee8e023751557b90ef7ef518326
|
/level2/level2.py
|
6b6d4cb94b858331308d54c42e681b7377f549a3
|
[] |
no_license
|
1337536723/buuctf_pwn
|
b6e5d65372ed0638a722faef1775026a89321fa3
|
cca3c4151a50c7d7c3237dab2c5a283dbcf6fccf
|
refs/heads/master
| 2023-08-29T19:35:04.352530
| 2021-11-16T14:06:20
| 2021-11-16T14:06:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
from pwn import *
bin_addr = 0x0804a024
#p = process('./level2')
p = remote('node3.buuoj.cn', 26359)
elf = ELF('level2')
sys_addr = elf.plt['system']
p.recvuntil('Input:')
payload = b'a' * ( 0x88 + 4 ) + p32(sys_addr) + p32(0x123) + p32(bin_addr)
p.sendline(payload)
p.interactive()
|
[
"admin@srmxy.cn"
] |
admin@srmxy.cn
|
c85b43f04745e322592dcd18a3f52120461d5379
|
95a534d8e0a3d29ae5224e7135f1961a14f2674d
|
/app/one_to_one/models.py
|
45135b5ad8e389f9ac61ad03a44a9a63a3899105
|
[] |
no_license
|
mongkyo/prac-document
|
7a96bfed1d62411dcf231376898f73b94bdb969f
|
65a5331130feb3a0e135255c82ea8d2ba23d4ecc
|
refs/heads/master
| 2020-03-31T08:06:43.134379
| 2018-10-11T17:40:42
| 2018-10-11T17:40:42
| 152,046,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return f'{self.name} the place'
class Restaurant(models.Model):
place = models.OneToOneField(
Place,
on_delete=models.CASCADE,
primary_key=True,
)
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return f'{self.place.name} the restaurant'
class Waiter(models.Model):
restaurant = models.ForeignKey(
Restaurant,
on_delete=models.CASCADE,
)
name = models.CharField(max_length=50)
def __str__(self):
return f'{name} the waiter at {restaurant}'.formate(
name=self.name,
restaurant=self.restaurant,
)
|
[
"dreamong91@gmail.com"
] |
dreamong91@gmail.com
|
64e08a3ae92f72b778581622471ad547fefae6dd
|
b284097fb7eda14307defe2dd19fe290a366a8b3
|
/addons-vauxoo/invoice_cancel_iva/model/invoice.py
|
b868901d2963f3be0082d600541caff1c9bfe1db
|
[] |
no_license
|
OpenBusinessSolutions/odoo-fondeur-server
|
41420069e77b2faaf12c396e5d3d2a2c165a8ae2
|
9c588e45011a87ec8d9af73535c4c56485be92f7
|
refs/heads/master
| 2021-01-01T05:45:29.736682
| 2016-04-19T15:21:58
| 2016-04-19T15:21:58
| 56,607,743
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,060
|
py
|
# coding: utf-8
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Vauxoo C.A.
# Planified by: Nhomar Hernandez
# Audited by: Vauxoo C.A.
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from openerp.osv import osv
from openerp.tools.translate import _
import openerp.workflow as workflow
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
#~ def action_cancel_draft(self, cr, uid, ids, *args):
#~
#~ wf_service = workflow
#~ res = super(account_invoice, self).action_cancel_draft(cr, uid, ids, ())
#~ for i in self.browse(cr,uid,ids,context={}):
#~ if i.wh_iva_id:
#~ wf_service.trg_validate(uid, 'account.wh.iva',i.wh_iva_id.id, 'set_to_draft', cr)
#~ return res
def action_number(self, cr, uid, ids, context=None):
'''
Modified to witholding vat validate
'''
wf_service = workflow
res = super(AccountInvoice, self).action_number(cr, uid, ids)
iva_line_obj = self.pool.get('account.wh.iva.line')
invo_brw = self.browse(cr, uid, ids, context=context)[0]
state = [('draft', 'set_to_draft'), (
'confirmed', 'wh_iva_confirmed'), ('done', 'wh_iva_done')]
if invo_brw.cancel_true:
if invo_brw.wh_iva_id:
iva_line_obj.load_taxes(cr, uid, [
i.id for i in invo_brw.wh_iva_id.wh_lines],
context=context)
for d in state:
if invo_brw.wh_iva_id.prev_state == 'cancel':
break
if not all([False for line in invo_brw.wh_iva_id.wh_lines
if not line.invoice_id.move_id]):
raise osv.except_osv(_('Error'), _(
'One of the bills involved in the vat retention\
has not been validated, because it does not\
have an associated retention'))
wf_service.trg_validate(
uid, 'account.wh.iva', invo_brw.wh_iva_id.id, d[1], cr)
if d[0] == invo_brw.wh_iva_id.prev_state:
break
return res
def invoice_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
context.update({'iva': True})
iva_obj = self.pool.get('account.wh.iva')
invo_brw = self.browse(cr, uid, ids, context=context)[0]
if invo_brw.wh_iva_id:
iva_obj.write(cr, uid, [invo_brw.wh_iva_id.id], {
'prev_state': invo_brw.wh_iva_id.state},
context=context)
res = super(AccountInvoice, self).invoice_cancel(
cr, uid, ids, context=context)
return res
def check_iva(self, cr, uid, ids, context=None):
if context is None:
context = {}
invo_brw = self.browse(cr, uid, ids[0], context=context)
if invo_brw.wh_iva_id:
return False
return True
|
[
"tecnologia@obsdr.com"
] |
tecnologia@obsdr.com
|
27dcc1afb9eef48f0b54eae6b5613fe0829275e1
|
c3e10c7174f78a8ac2dc0823a1fcfa4c80afc67b
|
/1elinearsearch.py
|
2cca16f64fbbb233b2f111ac68456068d9554835
|
[] |
no_license
|
PreritBhandari/python-programs-III
|
3460c63e56ce6383d71ec594274c4b3edf984117
|
eea3fbecae59b410971e11ff3a50504752cb60da
|
refs/heads/master
| 2022-11-19T09:20:11.332556
| 2020-07-19T03:56:44
| 2020-07-19T03:56:44
| 280,787,593
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
# e) linear search
def LinearSearch(lys, element):
for i in range(len(lys)):
if lys[i] == element:
return i
return False
if __name__ == "__main__":
print(LinearSearch([1, 2, 3, 4, 5, 2, 1], 2))
|
[
"patrioticprerit@gmail.com"
] |
patrioticprerit@gmail.com
|
42394a236222b49e0c60dc927584f9b29b99139a
|
f68d246ea82f980706bfa574da91d99797c29b38
|
/activeCode/heap.py
|
4d0a8944d04ab21537ff135bbd68862108086e79
|
[] |
no_license
|
nicolas4d/Problem-Solving-with-Algorithms-and-Data-Structures-using-Python
|
40684370ab0c8a22894aa58c0479da6697ea0a13
|
5c7595cab3c5501e4b4177b700708a2609c74e30
|
refs/heads/master
| 2020-12-02T13:43:49.547926
| 2020-02-01T14:19:08
| 2020-02-01T14:19:08
| 231,025,645
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
from pythonds.trees import BinHeap
bh = BinHeap()
bh.insert(5)
bh.insert(7)
bh.insert(3)
bh.insert(11)
print(bh.delMin())
print(bh.delMin())
print(bh.delMin())
print(bh.delMin())
|
[
"nicolas4d@foxmail.com"
] |
nicolas4d@foxmail.com
|
066227fc4417ba0fff2fa3345443eb2815dab4c4
|
b23d294fdffabe72c336644f119860f5ce704eef
|
/python_1000phone/预科/day2-PIL/04-文字和颜色块.py
|
fa76439697eb6e145f5254e5ba57f3ea695b190c
|
[] |
no_license
|
ikaros274556330/my_code
|
65232758fd20820e9f4fa8cb5a6c91a1969862a2
|
92db21c4abcbd88b7bd77e78d9f660b4534b5071
|
refs/heads/master
| 2020-11-26T09:43:58.200990
| 2019-12-23T02:08:39
| 2019-12-23T02:08:39
| 229,032,315
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
"""__author:吴佩隆"""
from PIL import Image,ImageFont,ImageDraw
# 1.文字水印 - 将文字渲染在图片上
# 准备图片
image1 = Image.open('./files/chiling.jpg')
# 准备文字
# 1)创建字体对象
# ImageFont.truetype(字体文件的路径,字体大小)
font1 = ImageFont.truetype('files/bb.ttf',80)
# 2)创建draw对象
# draw = ImageDraw.Draw(image1)
draw = ImageDraw.Draw(image1)
# 3)写
# draw.text(文字坐标,内容,(颜色),字体对象)
draw.text((0,0),'Hello Word!',(0,0,0),font1)
image1.show()
# 2.颜色块
image2 = Image.new('RGB',(200,50),(255,255,255))
# 1)创建draw对象
draw2 = ImageDraw.Draw(image2)
# 2)将图片上指定坐标设置为指定颜色
# draw2.point(坐标,颜色)
draw2.point((0,0),(255,0,0))
image2.show()
|
[
"274556330@qq.com"
] |
274556330@qq.com
|
524ed7e94503183a799d610122964444c9b38b8e
|
304033f60097c489cbc60aab639be45ccdbef1a5
|
/algorithms/inflearn/section2/7.py
|
6e47b9bfce07d3c94cfb462166a5a879c41312ce
|
[] |
no_license
|
pgw928/TIL
|
3d0c47c07bd1f5c73826daf8579a2b0e3f93cb95
|
765906f1e6eecad4ad8ec9bf704041433d7eb304
|
refs/heads/master
| 2023-06-29T05:46:30.039815
| 2021-08-10T17:38:11
| 2021-08-10T17:38:11
| 288,923,095
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
import sys
sys.stdin = open('section2/input.txt', 'rt')
n = int(input())
nums = [True]*(n+1)
nums[0], nums[1] = False, False
for i in range(2, n//2+1):
for j in range(2*i, n+1, i):
nums[j] = False
print(sum(nums))
|
[
"pku928@naver.com"
] |
pku928@naver.com
|
99971c0b16dacf336e6bb8cfec3810444af31ef1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2882/39190/304719.py
|
48b5449910760181ac3be5702cc6fd925bd7e81f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
def func3(arr):
arr.reverse()
rra=arr
arr.reverse()
peak=max(arr)
if arr.index(peak)>0:
for i in range(arr.index(peak)-1):
if int(arr[i])>=int(arr[i+1]):
return False
if arr.index(peak)<len(arr)-rra.index(peak)-1:
for i in range(arr.index(peak),len(arr)-rra.index(peak)-2):
if int(arr[i])!=int(arr[i+1]):
return False
if rra.index(peak)>0:
for i in range(len(arr)-rra.index(peak)-1,len(arr)-1):
if int(arr[i])<=int(arr[i+1]):
return False
return True
ip=input()
arr=input().split(" ")
op=func3(arr)
if op==True:
print("YES")
else:
print("NO")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
9683287354e0075e12fd77dad8cf739231fb23f6
|
0ac4831465d0273effd087c75484474985fd1106
|
/link_rec/forms.py
|
ae67ff1282aa8f0d1c546ad426aa3d76d792747d
|
[] |
no_license
|
duggalr2/linkedin_recommend
|
af2a040b69cca4f190b8fe064f8048c0b412483c
|
d535df1643f2a37b8473962f496d83464aa839f3
|
refs/heads/master
| 2020-12-02T22:39:51.501151
| 2017-12-05T16:19:58
| 2017-12-05T16:19:58
| 96,162,172
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,627
|
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import formset_factory, ModelForm
INDUSTRY_CHOICES = (
('software', 'Software'),
('engineering', 'Engineering, excluding Software'),
('research', 'Research'),
('design', 'Design'),
('data_science', 'Data Science'),
('product_manager', 'Product Manager'),
('business_finance', 'Business and Finance'),
('startup_founder', 'Startup Founders/Executives'),
('admin_coordination', 'Startup Founders/Executives'),
('startup_founder', 'Admin/Coordination/IT/HR'),
('crypto_blockchain', 'Cryptography/Blockchain')
)
SCHOOL_NAMES = (
('university_of_toronto', 'University of Toronto'),
('harvard', 'Harvard University'),
('massachusetts_institute_of_technology', 'Massachusetts Institute of Technology'),
('waterloo', 'University of Waterloo'),
('stanford', 'Stanford University'),
('western', 'Western University'),
('university_of_california_berkeley', 'University of California, Berkeley'),
('caltech', 'Caltech'),
('cornell', 'Cornell University'),
('oxford', 'Oxford University'),
('carnegie_mellon_university', 'Carnegie Mellon University'),
('university_of_pennsylvania', 'University of Pennsylvania'),
('cambridge', 'University of Cambridge'),
('university_of_california_los_angeles', 'University of California, Los Angeles'),
('queens', "Queen's University"),
('columbia', 'Columbia University')
)
PROGRAM_CHOICES = (
('computer_science', 'Computer Science'),
('commerce_business', 'Commerce/Business/Finance'),
('humanities_lifesci', 'Humanities/LifeSci/HealthSci'),
('math_physics_statistics', 'Math/Physics/Statistics'),
('engineering', 'Engineering'),
)
class SignUpForm(UserCreationForm):
# this will add additional fields to the built-in User Creation Form
school = forms.ChoiceField(choices=SCHOOL_NAMES,)
school_program = forms.ChoiceField(choices=PROGRAM_CHOICES, )
industry_of_interest = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=INDUSTRY_CHOICES, )
school_of_interest = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=SCHOOL_NAMES, )
name = forms.CharField(max_length=250)
class Meta:
model = User
fields = ('name', 'username', 'password1', 'password2', 'school', 'school_program', 'industry_of_interest', 'school_of_interest')
MISCLASSIFY_SELECTION = (
('education_program', 'Education Program'),
('job_industry', 'Job Industry'),
)
class MisClassify(forms.Form):
first_selection = forms.ChoiceField(choices=MISCLASSIFY_SELECTION, )
class InitialEduClassify(forms.Form):
pass
class JobMisClassify(forms.Form):
# edu_correct = forms.ChoiceField(choices=MISCLASSIFY_SELECTION,)
def __init__(self, *args, **kwargs):
extra = kwargs.pop('extra')
super(JobMisClassify, self).__init__(*args, **kwargs)
for i, job in enumerate(extra):
self.fields['custom_%s' % i] = forms.ChoiceField(label=job, choices=INDUSTRY_CHOICES, required=False)
# self.fields['custom_%s' % i] = forms.CharField(label=job, max_length=250, required=False)
def extra_answers(self):
for name, value in self.cleaned_data.items():
if name.startswith('custom_'):
yield (self.fields[name].label, value)
# super(EducationMisClassify, self).__init__(*args, **kwargs)
# for i in range(0, n):
# self.fields["edu_correct %d" % i] = forms.ChoiceField(choices=MISCLASSIFY_SELECTION,)
# edu_correct = forms.CharField(max_length=250)
class EducationMisClassify(forms.Form):
edu_correct = forms.ChoiceField(choices=MISCLASSIFY_SELECTION,)
# job_selection = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=(('job1', 'Default Job 1'),))
#class AuthorForm(ModelForm):
# class Meta:
# model = Author
# fields = ['name', 'title', 'birth_date']
#
#
#class BookForm(ModelForm):
# class Meta:
# model = Book
# fields = ['name', 'authors']
#
#
#class MultiWidgetBasic(forms.widgets.MultiWidget):
# def __init__(self, attrs=None):
# widgets = [forms.TextInput(),
# forms.TextInput()]
# super(MultiWidgetBasic, self).__init__(widgets, attrs)
#
# def decompress(self, value):
# if value:
# return pickle.loads(value)
# else:
# return ['', '']
#
#
#class MultiExampleField(forms.fields.MultiValueField):
# widget = MultiWidgetBasic
#
# def __init__(self, *args, **kwargs):
# list_fields = [forms.fields.CharField(max_length=31),
# forms.fields.CharField(max_length=31)]
# super(MultiExampleField, self).__init__(list_fields, *args, **kwargs)
#
# def compress(self, values):
# return pickle.dumps(values)
#
#
#class FormForm(forms.Form):
# a = forms.BooleanField()
# b = forms.CharField(max_length=32)
# c = forms.CharField(max_length=32, widget=forms.widgets.Textarea())
# d = forms.CharField(max_length=32, widget=forms.widgets.SplitDateTimeWidget())
# e = forms.CharField(max_length=32, widget=MultiWidgetBasic())
# f = MultiExampleField()
#
# class UserForm(forms.ModelForm):
# class Meta:
# model = User
# fields = ('first_name', 'last_name', 'email')
#
#
# class ProfileForm(forms.ModelForm):
# class Meta:
# model = Profile
# fields = ('bio', 'location', 'birth_date')
|
[
"ibrahul24@gmail.com"
] |
ibrahul24@gmail.com
|
b40e44faa5e369870bc288871dc9c304d99d2c3e
|
34474048ec5c4850623cf0fea993b43de76fada4
|
/Tests/unittest/code_gen/tac_o1/mix_global_local_nested_int_char_array.tac
|
3af956749d4f00960c094f9db45c8c0e838c9ba5
|
[] |
no_license
|
imsure/C--
|
69a80e152936e31b14319ab16c2317d2cacc9165
|
9991e7135d6ebc8f6f08f46f37b82bfe353ec17f
|
refs/heads/master
| 2021-01-13T02:04:07.295401
| 2015-05-01T01:26:07
| 2015-05-01T01:26:07
| 30,732,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
tac
|
main:
Enter main 108
_taddr0 = x + 0
*_taddr0(int) = 0
_taddr1 = y + 0
*_taddr1(char) = 1
_taddr2 = x + 4
*_taddr2(int) = 1
_taddr3 = y + 1
*_taddr3(char) = 2
_taddr4 = x + 8
*_taddr4(int) = 2
_taddr5 = y + 2
*_taddr5(char) = 3
_taddr6 = x + 12
*_taddr6(int) = 3
_taddr7 = y + 3
*_taddr7(char) = 4
_taddr8 = x + 16
*_taddr8(int) = 4
_taddr9 = y + 4
*_taddr9(char) = 55
_taddr10 = x + 0
_tvar0 = *_taddr10(int) * 1
_taddr11 = y + _tvar0
_tvar0 = *_taddr11(char) * 4
_taddr12 = x + _tvar0
_tvar0 = *_taddr12(int) * 1
_taddr13 = y + _tvar0
_tvar0 = *_taddr13(char) * 4
_taddr14 = x + _tvar0
_tvar0 = *_taddr14(int) * 1
_taddr15 = y + _tvar0
_tvar0 = *_taddr15(char) * 4
_taddr16 = x + _tvar0
_tvar0 = *_taddr16(int) * 1
_taddr17 = y + _tvar0
_tvar0 = *_taddr17(char) * 4
_taddr18 = x + _tvar0
_tvar0 = *_taddr18(int) * 1
_taddr19 = y + _tvar0
u = *_taddr19(char)
Param u
Call print_int 1
_tstr0 = "\n"
Param _tstr0
Call print_string 1
Return
|
[
"imsure95@gmail.com"
] |
imsure95@gmail.com
|
d5848a9e5ad00752733438c6be9f15f855ff05c2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_pointer.py
|
5ac88a0ce708e9c89cc7b196e8ef77770aec8498
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
#calss header
class _POINTER():
def __init__(self,):
self.name = "POINTER"
self.definitions = [u'something that is used for pointing at things, such as a long, thin stick that you hold to direct attention to a place on a map or words on a board, or a cursor', u'a helpful piece of advice or information: ', u'something that shows you an existing or developing situation: ', u'a hunting dog that has been trained to stand very still with its nose pointing towards the animals and birds that are being hunted']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f5aa2f0a35d71460c6b936f9fe19313a0a13913b
|
1a3234c1deeb8987fb4c5b424e6485ddd10c8ace
|
/estagios/core/form.py
|
021338a4415b61c90edc91e57e34b3d1b2660f03
|
[] |
no_license
|
orlandosaraivajr/estagio
|
0c46b16fccf52861f68431a88032ba0fdc46bf66
|
439b797406c82673e9972eee373d60f844679a9c
|
refs/heads/master
| 2022-05-14T14:15:53.109355
| 2020-04-03T05:58:13
| 2020-04-03T05:58:13
| 189,227,824
| 0
| 0
| null | 2022-04-22T21:20:07
| 2019-05-29T13:09:14
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
from django import forms
from django.forms import ModelForm
from estagios.core.models import User
class LoginForm(ModelForm):
class Meta:
model = User
fields = ['email', 'password']
labels = {
'email': 'E-mail',
'password': 'Senha'
}
widgets = {
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'password': forms.PasswordInput(attrs={'class': 'form-control'})
}
help_texts = {
'email': ('E-mail cadastrado.'),
'password': ('Senha para acesso.'),
}
error_messages = {
'email': {
'required': ("Digite um e-mail válido."),
},
'password': {
'required': ("Senha não pode ser em branco."),
}
}
class NomeCompletoForm(ModelForm):
error_css_class = "error"
class Meta:
model = User
fields = ('first_name',)
labels = {
'first_name': 'Nome Completo',
}
widgets = {
'first_name': forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'Preencha seu nome completo.'
}
),
}
error_messages = {
'first_name': {
'required': ("Não deixe este campo em branco. Informe seu nome completo."),
},
}
def clean_first_name(self):
if self.cleaned_data['first_name'] != '':
return self.cleaned_data['first_name']
return 'Nome em Branco'
|
[
"orlandosaraivajr@gmail.com"
] |
orlandosaraivajr@gmail.com
|
2599ba25172f8c4d5bf9dfd7c2d42ef2a622b096
|
48a522b031d45193985ba71e313e8560d9b191f1
|
/baekjoon/python/10406.py
|
14779fe2b517e02188eedca6e18f8062aa9d26ff
|
[] |
no_license
|
dydwnsekd/coding_test
|
beabda0d0aeec3256e513e9e0d23b43debff7fb3
|
4b2b4878408558239bae7146bb4f37888cd5b556
|
refs/heads/master
| 2023-09-04T12:37:03.540461
| 2023-09-03T15:58:33
| 2023-09-03T15:58:33
| 162,253,096
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
import sys
count = 0
w, n, p = map(int, sys.stdin.readline().split())
punch_list = list(map(int, sys.stdin.readline().split()))
for punch in punch_list:
if w <= punch <= n:
count += 1
print(count)
|
[
"dydwnsekd123@gmail.com"
] |
dydwnsekd123@gmail.com
|
13ff38fd624a28f8e31a89d15df14a35ccd208fa
|
9ecf55bf2601e0d4f74e71f4903d2fd9e0871fd6
|
/my_seg_tf/v4_128_128/model/unet.py
|
ba7e761f74f601823dd64cf81e8c08124d5f3053
|
[] |
no_license
|
qq191513/mySeg
|
02bc9803cde43907fc5d96dc6a6a6371f2bef6fe
|
4337e6a0ca50b8ccbf6ed9b6254f2aec814b24db
|
refs/heads/master
| 2020-04-10T09:57:37.811133
| 2019-06-26T08:21:23
| 2019-06-26T08:21:23
| 160,951,962
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,033
|
py
|
import os
import tensorflow as tf
import sys
sys.path.append('../')
import config as cfg
lr_init = cfg.lr_init
class Unet(object):
def __init__(self, sess, config, is_train):
self.sess = sess
self.name = 'Unet'
self.mask = config.mask
self.ckpt_dir = config.ckpt_dir
self.is_train = is_train
self.images = tf.placeholder(tf.float32, [config.batch_size, config.input_shape[0], config.input_shape[1], config.input_shape[2]]) #initially 512,512,3 for Gray Images
self.labels = tf.placeholder(tf.float32, [config.batch_size, config.labels_shape[0], config.labels_shape[1], config.labels_shape[2]]) #initially 512,512, 256 for Binary Segmentation
self.pred = self.build(self.images)
# self.accuracy = self.compute_acc(self.recons, self.labels)
self.loss = self.compute_loss( self.labels, self.pred)
self.t_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
self.sess.run(tf.variables_initializer(self.t_vars))
self.saver = tf.train.Saver()
if not tf.gfile.Exists(self.ckpt_dir):
tf.gfile.MakeDirs(self.ckpt_dir)
self.summary_writer = tf.summary.FileWriter(self.ckpt_dir)
self.summary_op = tf.summary.merge(self.loss_summaries)
# self.summary_op = tf.summary.merge(self.acc_summaries)
self.optim = tf.train.AdamOptimizer(lr_init) #use NadamOptmizer
self.train = self.optim.minimize(self.loss)
def fit(self, images, labels, summary_step=-1):
if summary_step >= 0:
# _, loss_val,acc_val, summary_str = self.sess.run(
# [self.train, self.loss, self.acc,self.summary_op],
# {self.images:images, self.labels:labels})
# self.summary_writer.add_summary(summary_str, summary_step)
_,loss_val, summary_str = self.sess.run(
[self.train, self.loss, self.summary_op],
{self.images: images, self.labels: labels})
self.summary_writer.add_summary(summary_str, summary_step)
else :
# _, loss_val,acc_val = self.sess.run(
# [self.train, self.loss,self.acc],
# {self.images:images, self.labels:labels})
_, loss_val = self.sess.run(
[self.train, self.loss],
{self.images: images, self.labels: labels})
return loss_val
def predict(self, images):
result = self.sess.run([self.pred], {self.images:images})
return result
def compute_loss(self, labels,pred):
dice_loss = self.dice_coef_loss(labels, pred)
self.loss_summaries = [
tf.summary.scalar("dice_loss", dice_loss)]
total_loss = dice_loss
return total_loss
def build(self, images):
# with tf.variable_scope(self.name):
conv1 = self.conv2d(images, 64, 3)
conv1 = self.conv2d(conv1, 64, 3)
pool1 = self.maxpooling2d(conv1,[2,2])
conv2 = self.conv2d(pool1, 128, 3)
conv2 = self.conv2d(conv2, 128, 3)
pool2 = self.maxpooling2d(conv2,[2,2])
conv3 = self.conv2d(pool2, 256, 3)
conv3 = self.conv2d(conv3, 256, 3)
pool3 = self.maxpooling2d(conv3,[2,2])
conv4 = self.conv2d(pool3, 512, 3)
conv4 = self.conv2d(conv4, 512, 3)
up5 = tf.concat([self.conv2d_transpose(conv4,256,3), conv3], axis=3)
conv5 = self.conv2d(up5, 256, 3)
conv5 = self.conv2d(conv5, 256, 3)
up6 = tf.concat([self.conv2d_transpose(conv5,256,3), conv4], axis=3)
conv6 = self.conv2d(up6, 128, 3)
conv6 = self.conv2d(conv6, 128, 3)
up7 = tf.concat([self.conv2d_transpose(conv6,256,3), conv5], axis=3)
conv7 = self.conv2d(up7, 64, 3)
conv7 = self.conv2d(conv7, 64, 3)
conv8 = self.conv2d(conv7, 16, 1)
out = tf.squeeze(conv8, axis=3) # tf.squeeze remove the dimensions of value 1
print("shape of squeezed vector:", out.get_shape())
return out
def conv2d(self, x, channel, kernel, stride=1, padding="SAME",activation='relu'):
return tf.layers.conv2d(x, channel, kernel, stride, padding, activation,kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
def maxpooling2d(self,inputs,pool_size, strides,padding='valid', data_format='channels_last',name=None):
return tf.layers.max_pooling2d(inputs,pool_size, strides,padding=padding, data_format=data_format,name=name)
def conv2d_transpose(self, x, channel, kernel, stride=1, padding="SAME"):
return tf.layers.conv2d_transpose(x, channel, kernel, stride, padding,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
def save(self,epoch):
print('saving model.......')
self.saver.save(self.sess, os.path.join(self.ckpt_dir, "model_{}.ckpt".format(epoch)))
def restore(self,name):
print('restoring model: {}.......'.format(name))
self.saver.restore(self.sess, os.path.join(self.ckpt_dir, name))
|
[
"1915138054@qq.com"
] |
1915138054@qq.com
|
d369fd4de101726338d35665b676a132ab6c4567
|
32cb0be487895629ad1184ea25e0076a43abba0a
|
/LifePictorial/top/api/rest/CrmShopvipCancelRequest.py
|
c82fb8fafe43c8edd71af72a729843b38b0af2af
|
[] |
no_license
|
poorevil/LifePictorial
|
6814e447ec93ee6c4d5b0f1737335601899a6a56
|
b3cac4aa7bb5166608f4c56e5564b33249f5abef
|
refs/heads/master
| 2021-01-25T08:48:21.918663
| 2014-03-19T08:55:47
| 2014-03-19T08:55:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
'''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class CrmShopvipCancelRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'taobao.crm.shopvip.cancel'
|
[
"poorevil@gmail.com"
] |
poorevil@gmail.com
|
f4139ba7b59e752ce0180da1c48a07365de98486
|
c839961aeab22795200d9edef9ba043fe42eeb9c
|
/data/script1014.py
|
dabede7bc768ce46066e92849eee030bf819e85c
|
[] |
no_license
|
StevenLOL/kaggleScape
|
ad2bb1e2ed31794f1ae3c4310713ead1482ffd52
|
18bede8420ab8d2e4e7c1eaf6f63280e20cccb97
|
refs/heads/master
| 2020-03-17T05:12:13.459603
| 2018-05-02T19:35:55
| 2018-05-02T19:35:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,147
|
py
|
# coding: utf-8
# In[ ]:
# Inspiration 1: https://www.kaggle.com/tunguz/logistic-regression-with-words-and-char-n-grams/code
# Inspiration 2: https://www.kaggle.com/jhoward/nb-svm-strong-linear-baseline
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import re, string
import time
from scipy.sparse import hstack
from scipy.special import logit, expit
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
# In[ ]:
# Functions
def tokenize(s): return re_tok.sub(r' \1 ', s).split()
def pr(y_i, y, x):
p = x[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
def get_mdl(y,x, c0 = 4):
y = y.values
r = np.log(pr(1,y,x) / pr(0,y,x))
m = LogisticRegression(C= c0, dual=True)
x_nb = x.multiply(r)
return m.fit(x_nb, y), r
def multi_roc_auc_score(y_true, y_pred):
assert y_true.shape == y_pred.shape
columns = y_true.shape[1]
column_losses = []
for i in range(0, columns):
column_losses.append(roc_auc_score(y_true[:, i], y_pred[:, i]))
return np.array(column_losses).mean()
# In[ ]:
model_type = 'lrchar'
todate = time.strftime("%d%m")
# # Data
# In[ ]:
# read data
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
subm = pd.read_csv('../input/sample_submission.csv')
id_train = train['id'].copy()
id_test = test['id'].copy()
# add empty label for None
label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train['none'] = 1-train[label_cols].max(axis=1)
# fill missing values
COMMENT = 'comment_text'
train[COMMENT].fillna("unknown", inplace=True)
test[COMMENT].fillna("unknown", inplace=True)
# In[ ]:
# Tf-idf
# prepare tokenizer
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
# create sparse matrices
n = train.shape[0]
#vec = TfidfVectorizer(ngram_range=(1,2), tokenizer=tokenize, min_df=3, max_df=0.9, strip_accents='unicode',
# use_idf=1, smooth_idf=1, sublinear_tf=1 )
word_vectorizer = TfidfVectorizer(
tokenizer=tokenize,
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
min_df = 5,
token_pattern=r'\w{1,}',
ngram_range=(1, 3))
# ,
# max_features=250000)
all1 = pd.concat([train[COMMENT], test[COMMENT]])
word_vectorizer.fit(all1)
xtrain1 = word_vectorizer.transform(train[COMMENT])
xtest1 = word_vectorizer.transform(test[COMMENT])
char_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
min_df = 3,
ngram_range=(1, 6))
# ,
# max_features=250000)
all1 = pd.concat([train[COMMENT], test[COMMENT]])
char_vectorizer.fit(all1)
xtrain2 = char_vectorizer.transform(train[COMMENT])
xtest2 = char_vectorizer.transform(test[COMMENT])
# # Model
# In[ ]:
nfolds = 5
xseed = 29
cval = 4
# data setup
xtrain = hstack([xtrain1, xtrain2], format='csr')
xtest = hstack([xtest1,xtest2], format='csr')
ytrain = np.array(train[label_cols].copy())
# stratified split
skf = StratifiedKFold(n_splits= nfolds, random_state= xseed)
# storage structures for prval / prfull
predval = np.zeros((xtrain.shape[0], len(label_cols)))
predfull = np.zeros((xtest.shape[0], len(label_cols)))
scoremat = np.zeros((nfolds,len(label_cols) ))
score_vec = np.zeros((len(label_cols),1))
# In[ ]:
for (lab_ind,lab) in enumerate(label_cols):
y = train[lab].copy()
print('label:' + str(lab_ind))
for (f, (train_index, test_index)) in enumerate(skf.split(xtrain, y)):
# split
x0, x1 = xtrain[train_index], xtrain[test_index]
y0, y1 = y[train_index], y[test_index]
# fit model for prval
m,r = get_mdl(y0,x0, c0 = cval)
predval[test_index,lab_ind] = m.predict_proba(x1.multiply(r))[:,1]
scoremat[f,lab_ind] = roc_auc_score(y1,predval[test_index,lab_ind])
# fit model full
m,r = get_mdl(y,xtrain, c0 = cval)
predfull[:,lab_ind] += m.predict_proba(xtest.multiply(r))[:,1]
print('fit:'+ str(lab) + ' fold:' + str(f) + ' score:%.6f' %(scoremat[f,lab_ind]))
# break
predfull /= nfolds
# In[ ]:
score_vec = np.zeros((len(label_cols),1))
for ii in range(len(label_cols)):
score_vec[ii] = roc_auc_score(ymat[:,ii], predval[:,ii])
print(score_vec.mean())
print(multi_roc_auc_score(ymat, predval))
# # Store resultss
# In[ ]:
# store prval
prval = pd.DataFrame(predval)
prval.columns = label_cols
prval['id'] = id_train
prval.to_csv('prval_'+model_type+'x'+str(cval)+'f'+str(nfolds)+'_'+todate+'.csv', index= False)
# store prfull
prfull = pd.DataFrame(predfull)
prfull.columns = label_cols
prfull['id'] = id_test
prfull.to_csv('prfull_'+model_type+'x'+str(cval)+'f'+str(nfolds)+'_'+todate+'.csv', index= False)
# store submission
submid = pd.DataFrame({'id': subm["id"]})
submission = pd.concat([submid, pd.DataFrame(prfull, columns = label_cols)], axis=1)
submission.to_csv('sub_'+model_type+'x'+str(cval)+'f'+str(nfolds)+'_'+todate+'.csv', index= False)
|
[
"adithyagirish@berkeley.edu"
] |
adithyagirish@berkeley.edu
|
1aa72e7f053db9d44e6084691888488c1d1da4e8
|
b1baabe0f34a5595af3f9587b357155590f76569
|
/switcher
|
44806892454dc8ccc7239c8c9d4a227f32075524
|
[] |
no_license
|
anson-tang/3dkserver
|
cb41269801ec97d747bb7b853841c7ad4921ad94
|
4fec66a0e1c8454252f53bc9ba41ce220357f7e4
|
refs/heads/master
| 2021-01-19T05:27:11.555032
| 2016-06-22T01:13:04
| 2016-06-22T01:13:04
| 60,994,700
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,350
|
#!/usr/bin/env python
#-*-coding: utf-8-*-
import sys, os
from os.path import abspath, dirname, join, normpath
PREFIX = normpath( dirname( abspath( __file__ ) ) )
lib_path = normpath( join( PREFIX, 'lib' ) )
if lib_path not in sys.path: sys.path = [ lib_path ] + sys.path
from twisted.internet import reactor, defer
from rpc import ConnectorCreator
from setting import GATEWAYSERVER
from utils import print_e
cmd = None
seconds = 0
uids_admin_want_add = []
USAGE = '''{0}
[ USAGE ]:
./switcher on 打开游戏区。
./switcher status 查看游戏区当前状态。
./switcher off 关闭游戏区,但不需要停止服务器。
./switcher off 0 关闭游戏区,但不需要停止服务器。
./switcher off N 关闭游戏区,广播所有线上客户端,N + 3秒后游戏区所有进程停止。
./switcher add accountname accountname accountname 增加Admin账号,在游戏区关闭的情况下,仍然可以正常进入游戏。
'''
def switch( p ):
switch_on = ( cmd == 'on' )
return p.call( 'gm_server_status_switch', ( switch_on, seconds ) )
def add_admin( p ):
return p.call( 'gm_add_admin_user', uids_admin_want_add )
def status( p ):
return p.call( 'gm_server_status', None)
def usage( err ):
print USAGE.format( '[ E ]: ' + str( err ) if err else '' )
return False
def parse_argv():
global cmd, switch_on, seconds, uids_admin_want_add
_argv = sys.argv
_l = len( _argv )
if _l < 2:
return usage( '命令不正确。' )
else:
cmd = _argv[1].strip()
if cmd in ( 'on', 'off', 'status' ) and _l == 2:
return True
else:
if cmd == 'off' and _l == 3:
try:
seconds = int( _argv[2] )
except:
return usage( '倒计时格式不正确。' )
elif cmd == 'add' and _l >= 3:
try:
uids_admin_want_add = map( lambda s:s.strip(), _argv[2:] )
except:
return usage( '用户账号格式不正确。' )
else:
return usage( '未知错误。' )
return True
@defer.inlineCallbacks
def connected( p ):
res = None
if parse_argv():
if p:
try:
if cmd == 'add':
res = yield add_admin( p )
elif cmd == 'status':
res = yield status( p )
elif cmd in ( 'on', 'off' ):
res = yield switch( p )
else:
usage( '{0}: {1}'.format( '未知命令', cmd ) )
except:
print_e()
print '[ connected ]OK. cmd', cmd, 'and res([1, 1] means executed successfully)', res
else:
print '[ failed ]connect to {0} : {1} failed'.format(GATEWAYSERVER['localhost'], GATEWAYSERVER['port'])
reactor.stop()
def failed(error):
print '[ failed ]connect failed. error', error.getErrorMessage()
reactor.stop()
def main():
ConnectorCreator( None ).connect(GATEWAYSERVER['localhost'], GATEWAYSERVER['port'], timeout = 1).addCallbacks( connected, failed )
reactor.run()
if __name__ == '__main__': main()
|
[
"123@qq.com"
] |
123@qq.com
|
|
5aec16750f6f86a0cdcfca7f3b20d9375929d277
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/TsRjbMRoNCM3GHuDk_9.py
|
7ca02c3b7d778338936732065ba5053888817fc2
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
"""
The syllabic structure of Persian language is CV(C)(C). C stands for
consonants and V stands for Vowels. The CV(C)(C) means that there are three
types of syllables in Persian:
* CV
* CVC
* CVCC
Write a function that takes the phonetic transcription of a Persian word as an
argument and returns the syllabified word based on the syllabic structure. In
other word, put a period between syllables.
### Examples
syllabification("kAr") ➞ "kAr"
syllabification("bArAn") ➞ "bA.rAn"
syllabification("tA") ➞ "tA"
syllabification("deraxt") ➞ "de.raxt"
syllabification("pust") ➞ "pust"
syllabification("lAjevard") ➞ "lA.je.vard"
### Notes
* Mono-syllabic words don't need syllabification.
* Persian has six vowels: `a, A, e, i, o, u`
* Persian has 23 consonants: `p, b, t, d, k, g, G, ?, f, v, s, z, S, Z, x, h, c, j, m, n, r, l, y`
* Try to solve the problem by using RegEx.
### Hint
Since each syllable has only one vowel, it's not necessary to know the
consonants. Just knowing that there are only one consonant before the vowel
and 0 to 2 consonants after the vowel is enough to solve the challenge.
"""
def syllabification(word):
v = 'aAeiou'
lst_idx_v = [i for i, l in enumerate(word) if l in v]
if len(lst_idx_v) == 1:
return word
begin = 0
syllables = []
for i in range(1, len(lst_idx_v)):
syllables.append(word[begin: lst_idx_v[i] - 1])
begin = lst_idx_v[i] - 1
syllables.append(word[begin:])
return '.'.join(syllables)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
0c1509e1816728cd8c2678be1b3f957b1ad9cc38
|
75e951dcf749f62f2a292774968fe95fc4a353c8
|
/boa3/model/operation/unaryop.py
|
8d001b89427dfa1ef705bb7c35ec6be1e5ab8fb7
|
[
"Apache-2.0"
] |
permissive
|
jplippi/neo3-boa
|
e0a199d1ed2fa39abe09ebd3c013c360ca87f544
|
052be4adebb665113715bb80067d954f7ad85ad5
|
refs/heads/development
| 2022-08-19T10:17:43.610854
| 2020-05-25T20:30:42
| 2020-05-25T20:30:42
| 265,959,419
| 0
| 0
|
Apache-2.0
| 2020-05-25T20:39:59
| 2020-05-21T21:54:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
from typing import Optional
from boa3.model.operation.operator import Operator
from boa3.model.operation.unary.booleannot import BooleanNot
from boa3.model.operation.unary.negative import Negative
from boa3.model.operation.unary.positive import Positive
from boa3.model.operation.unary.unaryoperation import UnaryOperation
from boa3.model.type.type import IType
class UnaryOp:
# Arithmetic operations
Positive = Positive()
Negative = Negative()
# Logical operations
Not = BooleanNot()
@classmethod
def validate_type(cls, operator: Operator, operand: IType) -> Optional[UnaryOperation]:
"""
Gets a unary operation given the operator and the operand type.
:param operator: unary operator
:param operand: type of the operand
:return: The operation if exists. None otherwise;
:rtype: UnaryOperation or None
"""
for id, op in vars(cls).items():
if isinstance(op, UnaryOperation) and op.is_valid(operator, operand):
return op.build(operand)
@classmethod
def get_operation_by_operator(cls, operator: Operator) -> Optional[UnaryOperation]:
"""
Gets a unary operation given the operator.
:param operator: unary operator
:return: The operation if exists. If exists more than one operation with the same operator, returns the first
found. None otherwise.
:rtype: UnaryOperation or None
"""
for id, op in vars(cls).items():
if isinstance(op, UnaryOperation) and op.operator is operator:
return op
@classmethod
def get_operation(cls, operation: UnaryOperation) -> Optional[UnaryOperation]:
"""
Gets an unary operation given another operation.
:param operation: unary operation
:return: The operation if exists. None otherwise;
:rtype: UnaryOperation or None
"""
for id, op in vars(cls).items():
if type(operation) == type(op):
return op
|
[
"mirellamedeiros.09@hotmail.com"
] |
mirellamedeiros.09@hotmail.com
|
030b4c362a080ed3fefaefec7c4a04c2570a0144
|
8f7b7a910520ba49a2e614da72f7b6297f617409
|
/Problemset/isomorphic-strings/isomorphic-strings.py
|
bfd41976cf490a5bac150110e345392d060325ff
|
[] |
no_license
|
fank-cd/python_leetcode
|
69c4466e9e202e48502252439b4cc318712043a2
|
61f07d7c7e76a1eada21eb3e6a1a177af3d56948
|
refs/heads/master
| 2021-06-16T23:41:55.591095
| 2021-03-04T08:31:47
| 2021-03-04T08:31:47
| 173,226,640
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
# @Title: 同构字符串 (Isomorphic Strings)
# @Author: 2464512446@qq.com
# @Date: 2020-12-28 16:12:46
# @Runtime: 48 ms
# @Memory: 17.1 MB
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
d1,d2 = defaultdict(list), defaultdict(list)
for index,i in enumerate(s):
d1[i].append(index)
for index,i in enumerate(t):
d2[i].append(index)
# print(list(d1.values()),list(d2.values()))
return list(d1.values()) == list(d2.values())
|
[
"2464512446@qq.com"
] |
2464512446@qq.com
|
0f61974c5e834f3cba8ffe47ed15b4b1a1f3aba8
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_127/118.py
|
d4733f2f4831c105eb5335bfcd752027eea7d78a
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
import math
def get_number_of_test_case():
return int(raw_input().strip())
def ans(x, y, n):
if n == 1:
if abs(x) + abs(y) != 1:
return False
elif x == 1:
return 'E'
elif x == -1:
return 'W'
elif y == 1:
return 'N'
elif y == -1:
return 'S'
else:
threshold = (n * (n - 1) / 2)
for item in [[x + n, y, 'W',], [x - n, y, 'E',], [x, y + n, 'S',], [x, y - n, 'N',]]:
if abs(item[0]) + abs(item[1]) <= threshold:
result = ans(item[0], item[1], n - 1)
if result:
return result + item[2]
return False
def solve_case(t):
x, y = [int(i) for i in raw_input().strip().split()]
z = abs(x) + abs(y)
n = int(math.ceil((math.sqrt(z * 8 + 1) - 1) / 2))
found = False
result = ''
while not found:
result = ans(x, y, n)
if result:
found = True
n += 1
print 'Case #%d: %s' % (t, result,)
T = get_number_of_test_case()
t = 1
while t <= T:
solve_case(t)
t += 1
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
d6d823f39170c014d0a11739f5a3ab7b90f9178c
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4347/codes/1793_1595.py
|
05495cd7b509876291eb94882aeab29cc3d2410f
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
from numpy import*
n=array(eval(input("nota dos alunos")))
h=0
t=0
while(size(n)>h):
t=t+n[h]
h=h+1
t=t-min(n)
y=size(n)
y=y-1
t=t/y
print(round(t,2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
ba8ea0976052895c62f71ec036fb295afc85a666
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_LinearTrend_Seasonal_Minute_AR.py
|
8af17b88b486c656cac6b7a11b9ebc140c4f4ea5
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 161
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['LinearTrend'] , ['Seasonal_Minute'] , ['AR'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
557c6f588642ff8207591f649b68e0f4d5928909
|
dfc991b4163bca9192985bc6daa12168283ffac8
|
/test/aqua/operators/__init__.py
|
7909fc6dac6c123e255fc08303846d478c8de9e3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Unathi-Skosana/qiskit-aqua
|
feb8231a1719a0e73aaae0f29df0246b3dc9419c
|
e13f66eda6d8b819a6f132319a2bac819941f6b1
|
refs/heads/master
| 2020-11-24T15:39:29.645914
| 2020-08-07T22:36:23
| 2020-08-07T22:36:23
| 255,790,533
| 2
| 0
|
Apache-2.0
| 2020-04-15T03:06:06
| 2020-04-15T03:06:06
| null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
|
[
"manoel@us.ibm.com"
] |
manoel@us.ibm.com
|
72391153bf1b5a4b9500125fb9c2eab3123cfda6
|
bd7e89c8e55a106af3dab4cf036309ec8a3e05e2
|
/Onsite/Week1/stick_of_truth_1.py
|
c50e9491405a6de5ff35dd5cf1ee6b39d36a44b4
|
[] |
no_license
|
wiput1999/PrePro60-Python
|
758ec60addaa61ff27ea9bc46474eaf244f5ab58
|
d26dcadcd71896589f992a69cbff711ec0576f59
|
refs/heads/master
| 2021-09-15T19:52:10.899257
| 2018-06-09T12:52:42
| 2018-06-09T12:52:42
| 89,356,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
""" [Stick of Truth - 1] Time Converter """
def main():
""" Convert time """
# Parallel Hour
para_hr = int(input())
# Parallel Minute
para_min = int(input())
# Parallel Second
para_sec = int(input())
pre_result = (para_hr * 50 * 29) + (para_min * 29) + para_sec
pre_result *= 14
# Real World Second
real_sec = pre_result % 60
pre_result //= 60
# Real World Minute
real_min = pre_result % 60
pre_result //= 60
# Real World Hour
real_hr = pre_result % 24
pre_result //= 24
# Real World Day
real_day = pre_result
print("%02d:%02d:%02d" %(real_hr, real_min, real_sec))
print("Day : %d" %real_day)
main()
|
[
"wiput.pootong@gmail.com"
] |
wiput.pootong@gmail.com
|
a4bb39a9334acf1bf77a42f83b0699981a29f9c7
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/anagram/src/374.py
|
c539bcd239aa7680c066a136c90fe90dc92c4ca3
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
def detect_anagrams(reference, word_list):
reference = reference.casefold()
ref_list = sorted(reference)
detect_anagram = lambda w1 : w1 != ref and sorted(w1) == ref_list
return [word
for word
in word_list
if detect_anagram(word.casefold())]
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
bc3fdf3b58a0e51964c0ff34acad6251cbc52f5f
|
7a604a685f9729cd691a7c81f12f2f8a297744de
|
/feedly/default_settings.py
|
2cacfbb53bcb1181a10700376b241fc1d96dd34f
|
[
"BSD-3-Clause"
] |
permissive
|
vmaliwal/Feedly
|
16b3e6ba90646dcbce863f6a2b5613b832a21c0e
|
ec9c8655b4b831cda22d12afa7e39dc382a86b4e
|
refs/heads/master
| 2021-01-20T22:55:09.042922
| 2013-08-08T13:39:34
| 2013-08-08T13:39:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
FEEDLY_NYDUS_CONFIG = {
'CONNECTIONS': {
'redis': {
'engine': 'nydus.db.backends.redis.Redis',
'router': 'nydus.db.routers.redis.PrefixPartitionRouter',
'hosts': {
0: {'prefix': 'default', 'db': 2, 'host': 'localhost', 'port': 6379},
12: {'prefix': 'feedly:', 'db': 0, 'host': 'localhost', 'port': 6379},
13: {'prefix': 'feedly:', 'db': 1, 'host': 'localhost', 'port': 6379},
14: {'prefix': 'notification:', 'db': 3, 'host': 'localhost', 'port': 6379},
}
},
}
}
FEEDLY_CASSANDRA_HOSTS = ['localhost']
|
[
"thierryschellenbach@gmail.com"
] |
thierryschellenbach@gmail.com
|
b035543cf5b0996c159636f236d14a00d492ff0f
|
fcc33e6a8b8af0ac1d69bd9815b786318c4b2d4b
|
/tests/testapp/migrations/0002_config_template.py
|
11ad23505a0fcdf0eab9bd30b3843fd68d91b43c
|
[
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PabloCastellano/openwisp-users
|
47a0c7b286422effaa57c192ee7a3e687afeb90f
|
d95cb7a1e30ef69b948216c54931ddf7a4a215fc
|
refs/heads/master
| 2022-07-08T03:02:35.622736
| 2020-04-15T22:47:39
| 2020-04-15T22:47:39
| 256,520,208
| 0
| 0
|
BSD-3-Clause
| 2020-04-17T14:07:29
| 2020-04-17T14:07:28
| null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-14 15:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='config',
name='template',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='testapp.Template'),
),
]
|
[
"nemesis@ninux.org"
] |
nemesis@ninux.org
|
47c521124fd2c1605e9cacaaffd327c383c76a12
|
8dfe4b53fae92795405d789d52148d1291836afa
|
/.metadata/.plugins/org.eclipse.core.resources/.history/b0/40309ba33381001515feb230dc0120b2
|
fe95f4e3a0d5f62b004a6c2ef67cbc23ae59821a
|
[] |
no_license
|
ymyjohnny/python
|
e07c54a88954e090cf3d30a4c6f6ac46353063fb
|
b483fd55e577d4dcceb5762bddf833df23874f3a
|
refs/heads/master
| 2021-01-10T01:10:19.038424
| 2019-07-02T02:40:23
| 2019-07-02T02:40:23
| 45,223,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
#!/usr/bin/python
#coding=utf-8
'''
Created on 2015-11-2
@author: ymy
'''
import os
dirname = '/tmp'
def allfile(dirname):
for base,dirs,files in os.walk(dirname):
for file in files:
filename = os.path.join(base,file)
filenames = filename.append()
#print filenames
def grep_a(file,str):
pass
allfile(dirname)
|
[
"ymyjohnny@adsame.com"
] |
ymyjohnny@adsame.com
|
|
aa237d039e97b9f01880f9bee5d1a2994a3a66ea
|
94318d8fa492445adb79547da369f141d8a80133
|
/scripts/plot_detection_rate.py
|
80df4af4d2dc49e02ca82f87f4f91f7f365b7f49
|
[] |
no_license
|
dragontt/geneoscopy_dev
|
630fbaca230dfd009667694ed8bb4a222e597eed
|
a5cf26ed0dc949c3d7af48d765864aff95edbe9d
|
refs/heads/master
| 2021-01-12T12:53:16.696478
| 2017-05-26T18:44:45
| 2017-05-26T18:44:45
| 69,477,315
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,061
|
py
|
#/usr/bin/python
import numpy as np
import sys
import matplotlib.pyplot as plt
##Color choice:
#507cb2 <- blue
#73aa53 <- green
#7f4d91 <- purple
filename = sys.argv[1]
color = sys.argv[2]
f = open(filename, 'r')
lines = f.readlines()
f.close()
labels = []
values = []
for i in range(1, (len(lines)-1)):
line = lines[i].strip().split('\t')
labels.append(line[0])
values.append(float(line[2].strip('%')))
indx = np.arange(len(labels)) + .1
fig, ax = plt.subplots(figsize=(4, 2.5), dpi=150)
plt.bar(indx, values, .5, color='#'+color)
plt.ylabel('Sensitivity (%)')
plt.xticks(indx+.25, labels, rotation=40)
plt.tick_params(axis=u'x', which=u'both',length=0)
plt.ylim([0, 100])
plt.gcf().subplots_adjust(bottom=0.35, left=.2)
# plt.show()
rects = ax.patches
for rect, value in zip(rects, values):
height = rect.get_height()
annot_text = ax.text(rect.get_x() + rect.get_width()/2, height - 12, ('%d%%' % value),
ha='center', va='bottom', color='white')
annot_text.set_fontsize(9)
plt.savefig(filename.strip('.txt')+'.pdf', fmt='pdf')
|
[
"you@example.com"
] |
you@example.com
|
965f0f0575173c3e422ccf531b6ee00c9c26153a
|
18f0ad99e21e2e35126f8c3c28079d358fa2129a
|
/Adafruit_STEMMA_Relay/code.py
|
90a3ebc494b98b85606ab958b6d7eed82af95a86
|
[
"MIT"
] |
permissive
|
ladyada/Adafruit_Learning_System_Guides
|
9bf18dfa35941e0cbecbb3c2d02b4fa3cb79744f
|
6d76801878cbf65132ccea950dc47ae842c73dcd
|
refs/heads/master
| 2023-08-20T20:30:42.910576
| 2022-01-10T20:28:11
| 2022-01-10T20:28:11
| 115,837,894
| 13
| 2
|
MIT
| 2020-03-31T23:23:45
| 2017-12-31T02:34:47
|
C
|
UTF-8
|
Python
| false
| false
| 225
|
py
|
import time
import board
import digitalio
relay = digitalio.DigitalInOut(board.A1)
relay.direction = digitalio.Direction.OUTPUT
while True:
relay.value = True
time.sleep(1)
relay.value = False
time.sleep(1)
|
[
"kattni@adafruit.com"
] |
kattni@adafruit.com
|
74f58ecbee8a351e9afa5d6b12189026de789cce
|
03e3138f99f275d15d41a5c5bfb212f85d64d02e
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/BCMessageWindowMeta.py
|
0012af4c374027efc9feaad96ad7d03c1dfb6253
|
[] |
no_license
|
TrenSeP/WorldOfTanks-Decompiled
|
e428728e7901146d0b599d02c930d70532232a97
|
1faa748acec1b7e435b657fd054ecba23dd72778
|
refs/heads/1.4.1
| 2020-04-27T08:07:49.813023
| 2019-03-05T17:37:06
| 2019-03-05T17:37:06
| 174,159,837
| 1
| 0
| null | 2019-03-06T14:33:33
| 2019-03-06T14:24:36
|
Python
|
UTF-8
|
Python
| false
| false
| 757
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/BCMessageWindowMeta.py
from tutorial.gui.Scaleform.pop_ups import TutorialDialog
class BCMessageWindowMeta(TutorialDialog):
def onMessageRemoved(self):
self._printOverrideError('onMessageRemoved')
def onMessageAppear(self, rendrerer):
self._printOverrideError('onMessageAppear')
def onMessageDisappear(self, rendrerer):
self._printOverrideError('onMessageDisappear')
def onMessageButtonClicked(self):
self._printOverrideError('onMessageButtonClicked')
def as_setMessageDataS(self, value):
return self.flashObject.as_setMessageData(value) if self._isDAAPIInited() else None
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
cbf6d2082e39aa257ea9fbe33b054caa5d9f3c3b
|
7c74ceb9f8addcc0816d012e0b84b174b96e0def
|
/src/azure-cli/azure/cli/command_modules/aro/commands.py
|
d260980c3351e0d1f012b5f449a5127298355e87
|
[
"MIT",
"LGPL-2.1-only",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
microsoft/azure-cli
|
4c826290e7a6f6bd27da3829b05e4f02ff6dc8d9
|
9ba64b33f6f78e2c3e42f8a147f59484300e8779
|
refs/heads/dev
| 2023-08-31T08:51:39.526556
| 2022-11-28T19:08:23
| 2022-11-28T19:08:23
| 370,900,439
| 7
| 7
|
MIT
| 2023-08-01T23:34:50
| 2021-05-26T03:59:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
from azure.cli.command_modules.aro._client_factory import cf_aro
from azure.cli.command_modules.aro._format import aro_show_table_format
from azure.cli.command_modules.aro._format import aro_list_table_format
from azure.cli.command_modules.aro._help import helps # pylint: disable=unused-import
def load_command_table(self, _):
aro_sdk = CliCommandType(
operations_tmpl='azure.mgmt.redhatopenshift.operations#OpenShiftClustersOperations.{}', # pylint: disable=line-too-long
client_factory=cf_aro)
with self.command_group('aro', aro_sdk, client_factory=cf_aro) as g:
g.custom_command('create', 'aro_create', supports_no_wait=True)
g.custom_command('delete', 'aro_delete', supports_no_wait=True, confirmation=True)
g.custom_command('list', 'aro_list', table_transformer=aro_list_table_format)
g.custom_show_command('show', 'aro_show', table_transformer=aro_show_table_format)
g.custom_command('update', 'aro_update', supports_no_wait=True)
g.wait_command('wait')
g.custom_command('list-credentials', 'aro_list_credentials')
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
abb7a8d52d3b436acb78715b3bb73aea337c4351
|
cc4d8bfef5395c0e3076e9a37a40864554e9099a
|
/cli.py
|
6d64d4987ace1d007791275ed73818dfd6440490
|
[
"MIT"
] |
permissive
|
NiklasRosenstein/yassg
|
99ce9cd327d7e4a72b94066a6e1d1c693b581b33
|
511ca5a1eb76b8fb314c45de6ac85a845b98243c
|
refs/heads/master
| 2021-01-01T04:49:03.758342
| 2017-11-15T13:02:52
| 2017-11-15T13:02:52
| 97,255,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
# Copyright (c) 2017 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import click
import toml
import os
import subprocess
import yassg from './yassg.py'
@click.command()
@click.argument('build_dir', default='build')
@click.option('-C', '--config', default=None,
help='Configuration file. Defaults to yassg.toml or .config/yassg.toml')
@click.option('--commit', is_flag=True,
help='Create a new commit after the build. Use only when the build '
'directory is set-up as a git worktree.')
@click.option('--push', is_flag=True,
help='Commit and push after the build. Use only when the build '
'directory is set-up as a git worktree.')
def main(build_dir, config, commit, push):
"""
Yet another static site generator.
"""
if not config:
config = 'yassg.toml'
if not os.path.isfile(config):
config = '.config/yassg.toml'
config_filename = config
with open(config) as fp:
config = toml.load(fp)
if 'content-directory' in config:
content_dir = os.path.join(os.path.dirname(config_filename), config['content-directory'])
else:
content_dir = 'content'
root = yassg.RootPage(yassg.pages_from_directory(content_dir, recursive=True))
root.sort()
renderer = yassg.Renderer(root, config)
renderer.render(build_dir)
if commit or push:
print('Creating new commit in "{}" ...'.format(build_dir))
subprocess.call(['git', 'add', '.'], cwd=build_dir)
subprocess.call(['git', 'commit', '-m', 'Update'], cwd=build_dir)
if push:
print('Pushing to "{}" ...'.format(build_dir))
subprocess.call(['git', 'push', 'origin', 'gh-pages'], cwd=build_dir)
if require.main == module:
main()
|
[
"rosensteinniklas@gmail.com"
] |
rosensteinniklas@gmail.com
|
708ac5d15ba5bd4ff5de1105d484cf04d937744f
|
70922de165319283d640821fd42ea1806da402c0
|
/math/0x05-advanced_linear_algebra/4-inverse.py
|
072f6b067b57f8f8f4c3b6c36868f390e22e15db
|
[] |
no_license
|
ikki2530/holbertonschool-machine_learning
|
bdd8152d575a99281e2cce105cf87442ec07f2fb
|
0b56aa0e92d65d4a5832cc994769834fbcfbe0ac
|
refs/heads/main
| 2023-07-07T00:49:03.675328
| 2021-08-11T10:27:56
| 2021-08-11T10:27:56
| 317,352,924
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,506
|
py
|
#!/usr/bin/env python3
"""
calculates the inverse of a matrix.
"""
def determinant(matrix):
"""
Calculates the determinant of a matrix.
- matrix is a list of lists whose determinant should be calculated.
Returns: the determinant of matrix
"""
n = len(matrix)
if n == 1 and len(matrix[0]) == 0 and type(
matrix) == list and type(matrix[0]) == list:
return 1
if n == 0:
raise TypeError("matrix must be a list of lists")
if type(matrix) != list:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) != list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a square matrix")
if len(matrix) == 1 and len(matrix[0]) == 1:
return matrix[0][0]
if n == 2:
a = matrix[0][0]
b = matrix[0][1]
c = matrix[1][0]
d = matrix[1][1]
det = a * d - (b * c)
return det
all_minors = []
mult = matrix[0]
signo = 1
signos = []
newm = []
temp = []
cofactorv = 0
# take the minors
for k in range(n):
for i in range(n):
for j in range(n):
if i != cofactorv and j != k:
temp.append(matrix[i][j])
if temp:
newm.append(temp.copy())
temp = []
if newm:
all_minors.append(newm)
signos.append(signo)
signo = signo * -1
newm = []
# add determinant
suma = 0
for i in range(n):
suma = suma + (signos[i] * mult[i] * determinant(all_minors[i]))
return suma
def minor(matrix):
"""
Calculates the minor matrix of a matrix.
- matrix is a list of lists whose minor matrix should be calculated.
Returns: the minor matrix of matrix
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1]]
newm = []
temp = []
minors = [[0 for j in range(n)] for i in range(n)]
# find the minor matrices
for h in range(n):
for w in range(n):
for i in range(n):
for j in range(n):
if i != h and j != w:
temp.append(matrix[i][j])
if temp:
newm.append(temp.copy())
temp = []
if newm:
# Add a new minor
minors[h][w] = determinant(newm)
newm = []
return minors
def cofactor(matrix):
"""
Calculates the cofactor matrix of a matrix.
- matrix is a list of lists whose cofactor matrix should be calculated.
Returns: the cofactor matrix of matrix.
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1]]
cofactor = minor(matrix)
sign = -1
for i in range(n):
for j in range(n):
cofactor[i][j] = cofactor[i][j] * (sign**(i+j))
return cofactor
def adjugate(matrix):
"""
Calculates the adjugate matrix of a matrix.
- matrix is a list of lists whose adjugate matrix should be calculated.
Returns: the adjugate matrix of matrix.
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1]]
cf = cofactor(matrix)
adj = [[0 for j in range(n)] for i in range(n)]
# transpose of cofactors matrix
for i in range(n):
for j in range(n):
adj[j][i] = cf[i][j]
return adj
def inverse(matrix):
"""
Calculates the inverse of a matrix.
- matrix is a list of lists whose inverse should be calculated.
Returns: the inverse of matrix, or None if matrix is singular.
"""
if type(matrix) is not list:
raise TypeError("matrix must be a list of lists")
n = len(matrix)
if n == 0:
raise TypeError("matrix must be a list of lists")
for row in matrix:
if type(row) is not list:
raise TypeError("matrix must be a list of lists")
if len(row) != n:
raise ValueError("matrix must be a non-empty square matrix")
if n == 1:
return [[1 / matrix[0][0]]]
adj = adjugate(matrix)
det = determinant(matrix)
if det == 0:
return None
inverse = [[0 for j in range(n)] for i in range(n)]
for i in range(n):
for j in range(n):
inverse[i][j] = adj[i][j] / det
return inverse
|
[
"dagomez2530@gmail.com"
] |
dagomez2530@gmail.com
|
72f7059f397a28a6fc5d98863a2b760954f5192a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/701.py
|
e899850bc93a963d05e10c0c43026ddae3581bf4
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
N = int(raw_input())
for p in range(N):
c, f, x = [float(x) for x in raw_input().split()]
ps = 2
sc = 0
mn = 1e18
while True:
if x/ps+sc > mn: break
mn = x/ps+sc
sc = sc + c/ps
ps = ps + f
print "Case #%d: %.7f" % (p+1, mn)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ebb12756eedd2b1951d254e6c61ebf9cc3fccc37
|
24a377bcf06aac43eb099f5ce2383e5da07ddadc
|
/analysis/set_num_runs.py
|
7a02d3330ac65acc6e51f1470227da9a0bc78537
|
[] |
no_license
|
AlJohri/nulaundry
|
223f0cf4b5c4a46e083512b35f4cddc5879d39ab
|
be61f72dd69cc633458d3e147a1593b2e6bf01c4
|
refs/heads/master
| 2020-05-19T14:15:44.561527
| 2015-04-15T04:21:29
| 2015-04-15T04:21:29
| 33,854,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
from firebase import firebase
firebase = firebase.FirebaseApplication('https://aljohri-nulaundry.firebaseio.com', None)
machines = firebase.get("/machines", None)
for machine_id, machine in machines.iteritems():
num_runs = len(machine['runs'].values()) if machine.get('runs') else 0
print "Machine %s has %d runs" % (machine_id, num_runs)
firebase.put(url='/machines/%s' % machine_id, name="num_runs", data=num_runs)
|
[
"al.johri@gmail.com"
] |
al.johri@gmail.com
|
2c1729733b5515b33837a25e1c54ba55a64c4d70
|
744c3b66611b08782fcdd9d66261c4d55b00d426
|
/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/gait_generator.py
|
61bd849acf0af82aee1c3cb503e4381a69ee2973
|
[
"Zlib"
] |
permissive
|
erwincoumans/bullet3
|
4ff9e0aa64b641c65b57b26f415dd69dbfb12256
|
6d181d78a5c7be8714c74055cddcf63d5ccef70a
|
refs/heads/master
| 2023-03-10T14:58:18.072562
| 2023-02-24T18:32:53
| 2023-02-24T18:32:53
| 31,621,748
| 103
| 29
|
NOASSERTION
| 2019-02-25T17:31:00
| 2015-03-03T21:15:54
|
C++
|
UTF-8
|
Python
| false
| false
| 685
|
py
|
"""Gait pattern planning module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum
class LegState(enum.Enum):
"""The state of a leg during locomotion."""
SWING = 0
STANCE = 1
# A swing leg that collides with the ground.
EARLY_CONTACT = 2
# A stance leg that loses contact.
LOSE_CONTACT = 3
class GaitGenerator(object): # pytype: disable=ignored-metaclass
"""Generates the leg swing/stance pattern for the robot."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def reset(self, current_time):
pass
@abc.abstractmethod
def update(self, current_time):
pass
|
[
"erwin.coumans@gmail.com"
] |
erwin.coumans@gmail.com
|
64608d15d268eb8a172554ecfb7235b36b175d0b
|
677a3a76807d8585f65ec0e0839bb3a8b833e2fb
|
/10.Design Patterns/Lab/1.Abstract_factory.py
|
cd4cff1cf3cb94f6f195c13a8447e4bec28e90a5
|
[] |
no_license
|
negative0101/Python-OOP
|
0d531a1b72beb3e58f9486df88d457ecd59be10e
|
b5825e66a909c947a46458712d683e8a38035912
|
refs/heads/main
| 2023-07-14T11:27:34.841594
| 2021-08-20T08:49:04
| 2021-08-20T08:49:04
| 381,475,313
| 0
| 0
| null | 2021-07-25T19:52:38
| 2021-06-29T19:26:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
from abc import ABC, abstractmethod
class Chair:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class Sofa:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class Table:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class AbstractFactory(ABC):
@abstractmethod
def create_chair(self):
pass
@abstractmethod
def create_table(self):
pass
@abstractmethod
def create_sofa(self):
pass
class VictorianFactory(AbstractFactory):
def create_chair(self):
return Chair('Victorian chair')
def create_sofa(self):
return Sofa('Victorian sofa')
def create_table(self):
return Table('Victorian table')
class ArtFactory(AbstractFactory):
def create_chair(self):
return Chair('Art chair')
def create_sofa(self):
return Sofa('Art sofa')
def create_table(self):
return Table('Art table')
class ModernFactory(AbstractFactory):
def create_chair(self):
return Chair('Modern chair')
def create_sofa(self):
return Sofa('Modern sofa')
def create_table(self):
return Table('Modern table')
def get_factory(style):
if style == 'Victorian':
return VictorianFactory()
elif style == 'Art':
return ArtFactory()
elif style == 'Modern':
return ModernFactory()
if __name__ == '__main__':
client_style = input()
factory = get_factory(client_style)
print(factory.create_chair())
|
[
"noreply@github.com"
] |
negative0101.noreply@github.com
|
3d7ce7f23f60f696a1c6fc3dad73799d24bb83a9
|
2bcc421ee345b00cf805c543b37d18b5d019dc04
|
/adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/rfm69_transmit.py
|
8f290230a0177f4531c020ba3597b90cb31b0bc0
|
[] |
no_license
|
saewoonam/sc-current-source-titano
|
5a1ad46889c1b09c168424901fd71cb4eab5c61b
|
1c136aa8b61268d9ac0b5a682b30ece70ab87663
|
refs/heads/main
| 2023-03-02T22:12:26.685537
| 2021-02-09T03:28:01
| 2021-02-09T03:28:01
| 317,299,900
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
# Example to send a packet periodically
# Author: Jerry Needell
#
import time
import board
import busio
import digitalio
import adafruit_rfm69
# set the time interval (seconds) for sending packets
transmit_interval = 10
# Define radio parameters.
RADIO_FREQ_MHZ = 915.0 # Frequency of the radio in Mhz. Must match your
# module! Can be a value like 915.0, 433.0, etc.
# Define pins connected to the chip.
CS = digitalio.DigitalInOut(board.CE1)
RESET = digitalio.DigitalInOut(board.D25)
# Initialize SPI bus.
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
# Initialze RFM radio
rfm69 = adafruit_rfm69.RFM69(spi, CS, RESET, RADIO_FREQ_MHZ)
# Optionally set an encryption key (16 byte AES key). MUST match both
# on the transmitter and receiver (or be set to None to disable/the default).
rfm69.encryption_key = (
b"\x01\x02\x03\x04\x05\x06\x07\x08\x01\x02\x03\x04\x05\x06\x07\x08"
)
# initialize counter
counter = 0
# send a broadcast mesage
rfm69.send(bytes("message number {}".format(counter), "UTF-8"))
# Wait to receive packets.
print("Waiting for packets...")
# initialize flag and timer
send_reading = False
time_now = time.monotonic()
while True:
# Look for a new packet - wait up to 5 seconds:
packet = rfm69.receive(timeout=5.0)
# If no packet was received during the timeout then None is returned.
if packet is not None:
# Received a packet!
# Print out the raw bytes of the packet:
print("Received (raw bytes): {0}".format(packet))
# send reading after any packet received
if time.monotonic() - time_now > transmit_interval:
# reset timeer
time_now = time.monotonic()
# clear flag to send data
send_reading = False
counter = counter + 1
rfm69.send(bytes("message number {}".format(counter), "UTF-8"))
|
[
"nams@nist.gov"
] |
nams@nist.gov
|
8a3659cc339b77f4682c2220d784c46a647f5a6a
|
5b76a92ec02529f97bcf72ba2487f11b73684439
|
/pyxel_lander/__init__.py
|
40a9993bd4c8b06058baaef719dbb92ea720489a
|
[
"MIT"
] |
permissive
|
humrochagf/pyxel-lander
|
d9533598a56a1adba4c335167620950868bcec6b
|
05b76c45de69f7fa1ecf78cf1ba555e8771d3bfc
|
refs/heads/main
| 2023-03-08T06:18:39.555621
| 2023-03-06T02:18:09
| 2023-03-06T02:18:09
| 163,335,210
| 28
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
from pyxel_lander.constants import AUTHOR, EMAIL, VERSION
from pyxel_lander.game import Game
__author__ = AUTHOR
__email__ = EMAIL
__version__ = VERSION
__all__ = [
"__author__",
"__email__",
"__version__",
"Game",
]
|
[
"humrochagf@gmail.com"
] |
humrochagf@gmail.com
|
3082243e987f916fa7c31952331a62d65983a72c
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/4jNjHdZ2hmMh23pRg_8.py
|
def823a82703336877c1698b26cb5997d5eaa82f
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
def cutting_grass(lst, *cuts):
lsts = [[e - sum(cuts[:i+1]) for e in lst] for i in range(len(cuts))]
return [i if all(e > 0 for e in i) else 'Done' for i in lsts]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
d2d5e2724e5868e3de00fc519848b82369742bab
|
e62c8ee151671b999c6720ab8c2aa2f96c0d7f55
|
/examples/miniapps/boto3-session/boto3_session_example.py
|
33ed85f6dd4ffb0cf0fd7d12844a69a572b02a71
|
[] |
permissive
|
ets-labs/python-dependency-injector
|
45645973456bb6494386ad12103d06e1f1be2cd8
|
cc2304e46e054ae08dc12995428759fbfb51af10
|
refs/heads/master
| 2023-08-23T03:59:53.509743
| 2022-12-19T03:14:24
| 2022-12-19T03:14:24
| 28,774,758
| 3,217
| 273
|
BSD-3-Clause
| 2023-09-08T21:46:18
| 2015-01-04T13:23:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
"""Boto3 session example."""
import boto3.session
from dependency_injector import containers, providers
class Service:
def __init__(self, s3_client, sqs_client):
self.s3_client = s3_client
self.sqs_client = sqs_client
class Container(containers.DeclarativeContainer):
config = providers.Configuration()
session = providers.Resource(
boto3.session.Session,
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key,
aws_session_token=config.aws_session_token,
)
s3_client = providers.Resource(
session.provided.client.call(),
service_name="s3",
)
sqs_client = providers.Resource(
providers.MethodCaller(session.provided.client), # Alternative syntax
service_name="sqs",
)
service1 = providers.Factory(
Service,
s3_client=s3_client,
sqs_client=sqs_client,
)
service2 = providers.Factory(
Service,
s3_client=session.provided.client.call(service_name="s3"), # Alternative inline syntax
sqs_client=session.provided.client.call(service_name="sqs"), # Alternative inline syntax
)
def main():
container = Container()
container.config.aws_access_key_id.from_env("AWS_ACCESS_KEY_ID")
container.config.aws_secret_access_key.from_env("AWS_SECRET_ACCESS_KEY")
container.config.aws_session_token.from_env("AWS_SESSION_TOKEN")
container.init_resources()
s3_client = container.s3_client()
print(s3_client)
sqs_client = container.sqs_client()
print(sqs_client)
service1 = container.service1()
print(service1, service1.s3_client, service1.sqs_client)
assert service1.s3_client is s3_client
assert service1.sqs_client is sqs_client
service2 = container.service2()
print(service2, service2.s3_client, service2.sqs_client)
assert service2.s3_client.__class__.__name__ == "S3"
assert service2.sqs_client.__class__.__name__ == "SQS"
if __name__ == "__main__":
main()
|
[
"rmogilatov@gmail.com"
] |
rmogilatov@gmail.com
|
570a9766aa5228126ece666542ba521ded51bb84
|
37d10412479a81c652e3ebf0c21c101b68fe1b4d
|
/rebecca/bootstrapui/helpers.py
|
7c850d56c33719949e736ca874ba579b29d7a7d6
|
[
"MIT"
] |
permissive
|
rebeccaframework/rebecca.bootstrapui
|
97bfde14861d9b318fd1b0087a30c10cfbc18da3
|
e247aead62e75009256d8341a893b173ccfe3b10
|
refs/heads/master
| 2020-05-18T15:50:27.364424
| 2014-08-23T10:18:21
| 2014-08-23T10:18:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
import functools
from webhelpers2.html import HTML, escape, literal
from babel.dates import format_date, format_datetime, format_time
from babel.numbers import format_number, format_decimal, format_percent
def bind_locale(func, localename):
return functools.partial(func, locale=localename)
class WebHelper(object):
def __init__(self, request):
self.request = request
self.locale_name = request.locale_name
self.HTML = HTML
self.escape = escape
self.literal = literal
self.format_date = bind_locale(format_date, self.locale_name)
self.format_datetime = bind_locale(format_datetime, self.locale_name)
self.format_time = bind_locale(format_time, self.locale_name)
self.format_number = bind_locale(format_number, self.locale_name)
self.format_decimal = bind_locale(format_decimal, self.locale_name)
self.format_percent = bind_locale(format_percent, self.locale_name)
|
[
"aodagx@gmail.com"
] |
aodagx@gmail.com
|
5dc668e60985444fd9aa28246684e0b119ddea80
|
88ea6ae5a8f97e3771490583d8acecdbe2877fd8
|
/zips/plugin.video.vistatv/resources/lib/sources/en/watch32.py
|
e4449b2214e5d64dd66cc38e81ebeb999833a20b
|
[] |
no_license
|
staycanuca/PersonalDataVistaTV
|
26497a29e6f8b86592609e7e950d6156aadf881c
|
4844edbfd4ecfc1d48e31432c39b9ab1b3b1a222
|
refs/heads/master
| 2021-01-25T14:46:25.763952
| 2018-03-03T10:48:06
| 2018-03-03T10:48:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,798
|
py
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
##Cerebro ShowBox Scraper
#Cerebro ShowBox Scraper
# Addon Provider: MuadDib
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['watch32hd.co']
self.base_link = 'https://watch32hd.co'
self.search_link = '/watch?v=%s_%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title']
year = data['year']
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % (title.replace(':', ' ').replace(' ','_'),year)
search_results = client.request(url)
varid = re.compile('var frame_url = "(.+?)"',re.DOTALL).findall(search_results)[0].replace('/embed/','/streamdrive/info/')
res_chk = re.compile('class="title"><h1>(.+?)</h1>',re.DOTALL).findall(search_results)[0]
varid = 'http:'+varid
holder = client.request(varid)
links = re.compile('"src":"(.+?)"',re.DOTALL).findall(holder)
for link in links:
vid_url = link.replace('\\','')
if '1080' in res_chk:
quality = '1080p'
elif '720' in res_chk:
quality = '720p'
else:
quality = 'DVD'
sources.append({'source': 'Googlelink', 'quality': quality, 'language': 'en', 'url': vid_url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
|
[
"biglad@mgawow.co.uk"
] |
biglad@mgawow.co.uk
|
12ec82e69e5b3d50651b488913b9e56d768c6259
|
bd01527a0af06828c56206d1113c372787e0d1d0
|
/backend/media/justrelax/node/media/player.py
|
0fae154d42080e85207185137f7117e755f8f9ac
|
[] |
no_license
|
nosseb/justrelax
|
3810f3cbae507f3da3c7a0ab894e5c3236b8c9d1
|
812bdf7787a761c94afd867cfc4de20f993fc86a
|
refs/heads/master
| 2022-11-26T22:12:33.825056
| 2020-07-21T15:42:27
| 2020-07-21T15:42:27
| 263,049,627
| 0
| 0
| null | 2020-05-11T13:24:52
| 2020-05-11T13:24:51
| null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
from justrelax.common.logging_utils import logger
class MediaPlayerMixin:
STATE_NOT_STARTED = 'not_started'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
def __init__(self):
self.current_state = MediaPlayerMixin.STATE_NOT_STARTED
def play(self):
if self.current_state == MediaPlayerMixin.STATE_NOT_STARTED:
logger.debug('Player has not been started yet')
self._play()
self.current_state = MediaPlayerMixin.STATE_PLAYING
elif self.current_state == MediaPlayerMixin.STATE_PLAYING:
logger.debug('Player is already playing')
logger.debug('Nothing to do')
elif self.current_state == MediaPlayerMixin.STATE_PAUSED:
logger.debug('Player is paused and had already been started')
self._resume()
self.current_state = MediaPlayerMixin.STATE_PLAYING
else:
pass
def pause(self):
if self.current_state == MediaPlayerMixin.STATE_NOT_STARTED:
logger.debug('Player has not been started yet')
logger.debug('Nothing to do')
elif self.current_state == MediaPlayerMixin.STATE_PLAYING:
logger.debug('Player is already playing')
self._pause()
self.current_state = MediaPlayerMixin.STATE_PAUSED
elif self.current_state == MediaPlayerMixin.STATE_PAUSED:
logger.debug('Player is paused and had already been started')
logger.debug('Nothing to do')
else:
pass
def stop(self):
if self.current_state == MediaPlayerMixin.STATE_NOT_STARTED:
logger.debug('Player has not been started yet')
logger.debug('Nothing to do')
elif self.current_state == MediaPlayerMixin.STATE_PLAYING:
logger.debug('Player is already playing')
self._stop()
self.current_state = MediaPlayerMixin.STATE_NOT_STARTED
elif self.current_state == MediaPlayerMixin.STATE_PAUSED:
logger.debug('Player is paused and had already been started')
self._stop()
self.current_state = MediaPlayerMixin.STATE_NOT_STARTED
else:
pass
def _play(self):
logger.debug("Playing")
def _resume(self):
logger.debug("Resuming")
def _pause(self):
logger.debug("Pausing")
def _stop(self):
logger.debug("Stopping")
|
[
"jbaptiste.braun@gmail.com"
] |
jbaptiste.braun@gmail.com
|
485ebf8496bd146a42491a9f4317726e7d3725e0
|
79b93d7c36645735309a55973ec54d126956c612
|
/Round1B/DraupnirBig.py
|
bbd9b31b2927243318da8e69786c1f06597f0bb4
|
[] |
no_license
|
rocket3989/codeJam2019
|
a7523f27c73a8e69c35754ad1737f8587e626c9f
|
2d383ef2eefac43a86b24433bb6371961002adc5
|
refs/heads/master
| 2022-02-28T23:52:56.653242
| 2019-09-25T01:01:43
| 2019-09-25T01:01:43
| 179,910,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
T, M = list(map(int, input().split()))
for test in range(0,T):
r = [0,0,0,0,0,0,0]
inp = []
print(200)
inp.append(int(input()))
print(56)
inp.append(int(input()))
r[6] = (inp[0] % 2 ** 40) // 2 ** 33
inp[0] -= r[6] * 2 ** 33
r[5] = (inp[0] % 2 ** 50) // 2 ** 40
inp[0] -= r[5] * 2 ** 40
r[4] = inp[0] // 2 ** 50
inp[1] -= r[4] * 2 ** 14 + r[5] * 2 ** 11 + r[6] * 2 ** 9
r[3] = (inp[1] % 2 ** 28) // 2 ** 18
inp[1] -= r[6] * 2 ** 18
r[2] = (inp[1] % 2 ** 56) // 2 ** 28
inp[1] -= r[5] * 2 ** 28
r[1] = inp[1] // 2 ** 56
for out in r[1::]:
print(out, end=" ")
print()
res = int(input())
if res == -1:
exit()
|
[
"rocket3989@gmail.com"
] |
rocket3989@gmail.com
|
b8ce6bca58314b866a7d721d90990ae2cc5492a5
|
e3e8467a3bae0982bd1ae0a27474e59d61eabe95
|
/nukepy
|
efe0df8dd0ca6a266d16c453902c4d02f54a5aa2
|
[] |
no_license
|
LumaPictures/nukecli
|
d47cd5c5a8d15cf5e584ac5b87362ad5333fa8d6
|
7ca3829cf940a3d836eb0104f41fb00321c9c92c
|
refs/heads/master
| 2020-06-01T04:20:58.804388
| 2011-08-05T23:34:46
| 2011-08-05T23:34:46
| 2,163,112
| 15
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
#!/usr/bin/env python
"""
Wrapper for Nuke -t that behaves more like a normal python binary.
- adds support for -c flag to pass a string of python code to execute
- expands symbolic links
- can be used as the interpreter in executable python scripts (e.g. #!/usr/bin/env nukepy)
"""
from __future__ import with_statement
import sys
import os
import subprocess
import tempfile
newArgsList = []
nextIsPyCmd = False
tempFileName = None
try:
for arg in sys.argv[1:]:
if nextIsPyCmd:
nextIsPyCmd = False
fd, tempFileName = tempfile.mkstemp(suffix='.py',
prefix='nukepyCommand',
text=True)
with os.fdopen(fd, 'w') as tempFileHandle:
tempFileHandle.write(arg)
newArgsList.append(tempFileName)
elif arg == '-c':
if tempFileName is not None:
raise Exception('-c argument may only be given once')
nextIsPyCmd = True
elif os.path.islink(arg):
newArgsList.append(os.path.realpath(arg))
else:
newArgsList.append(arg)
procArgs = ["Nuke", "-c", "4G", "-t", "--"] + newArgsList
p = subprocess.Popen(procArgs)
os.waitpid(p.pid, 0)[1]
finally:
if tempFileName:
os.remove(tempFileName)
# this also works but exits in a slightly different way
#/bin/tcsh
#Nuke -t < $*
|
[
"chadrik@gmail.com"
] |
chadrik@gmail.com
|
|
b46e91baeb582c1b23025c803fe705dab8582a91
|
d2bbf50859beb3447d8e15d5d11f89942f1b21d3
|
/Top down design lab/conversion1.py
|
56c6680b68b9082c71627b440e82dace9a22c5a0
|
[] |
no_license
|
nbenkler/CS110_Intro_CS
|
f5bc8da21bac9bc4d1c01070b5f7cc75fb3ab012
|
372483bce661ef8052c2ebbe832cc6ec1922d113
|
refs/heads/master
| 2020-04-24T07:25:26.630687
| 2019-02-21T04:27:55
| 2019-02-21T04:27:55
| 171,798,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
#conversion1.py
# A program to convert Celsius temps to Fahrenheit
def main():
fileName = eval(input("What is the name of the file you would like to convert? "))
inFile = open(fileName, "r")
for line in inFile:
celsius = int(line)
fahrenheit = 9/5 * celsius + 32
print(celsius, "degrees celsius is", fahrenheit, "degrees in Fahrenheit.")
inFile.close()
main()
|
[
"you@example.com"
] |
you@example.com
|
8d626cd10ddba11c2cc47aaaae26cca82f16f13d
|
bb6ebff7a7f6140903d37905c350954ff6599091
|
/third_party/WebKit/Source/devtools/scripts/concatenate_js_files.py
|
69d776abae74eff185f6689bcb1c595dffa5d111
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
PDi-Communication-Systems-Inc/lollipop_external_chromium_org
|
faa6602bd6bfd9b9b6277ce3cd16df0bd26e7f2f
|
ccadf4e63dd34be157281f53fe213d09a8c66d2c
|
refs/heads/master
| 2022-12-23T18:07:04.568931
| 2016-04-11T16:03:36
| 2016-04-11T16:03:36
| 53,677,925
| 0
| 1
|
BSD-3-Clause
| 2022-12-09T23:46:46
| 2016-03-11T15:49:07
|
C++
|
UTF-8
|
Python
| false
| false
| 4,254
|
py
|
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script concatenates in place JS files in the order specified
# using <script> tags in a given 'order.html' file.
from __future__ import with_statement
from HTMLParser import HTMLParser
from cStringIO import StringIO
import os.path
import sys
rjsmin_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"..",
"..",
"build",
"scripts"))
sys.path.append(rjsmin_path)
import rjsmin
class OrderedJSFilesExtractor(HTMLParser):
def __init__(self, order_html):
HTMLParser.__init__(self)
self.ordered_js_files = []
self.feed(order_html)
def handle_starttag(self, tag, attrs):
if tag == 'script':
attrs_dict = dict(attrs)
if ('type' in attrs_dict and attrs_dict['type'] == 'text/javascript' and 'src' in attrs_dict):
self.ordered_js_files.append(attrs_dict['src'])
class PathExpander:
def __init__(self, paths):
self.paths = paths
def expand(self, filename):
for path in self.paths:
fname = os.path.join(path, filename)
if (os.access(fname, os.F_OK)):
return fname
return None
def main(argv):
if len(argv) < 3:
print('usage: %s order.html input_source_dir_1 input_source_dir_2 ... '
'output_file' % argv[0])
return 1
output_file_name = argv.pop()
input_order_file_name = argv[1]
with open(input_order_file_name, 'r') as order_html:
extractor = OrderedJSFilesExtractor(order_html.read())
expander = PathExpander(argv[2:])
output = StringIO()
for input_file_name in extractor.ordered_js_files:
full_path = expander.expand(input_file_name)
if (full_path is None):
raise Exception('File %s referenced in %s not found on any source paths, '
'check source tree for consistency' %
(input_file_name, input_order_file_name))
output.write('/* %s */\n\n' % input_file_name)
input_file = open(full_path, 'r')
output.write(input_file.read())
output.write('\n')
input_file.close()
if os.path.exists(output_file_name):
os.remove(output_file_name)
output_file = open(output_file_name, 'w')
output_file.write(rjsmin.jsmin(output.getvalue()))
output_file.close()
output.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
output_dir_name = os.path.dirname(output_file_name)
os.utime(output_dir_name, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"mrobbeloth@pdiarm.com"
] |
mrobbeloth@pdiarm.com
|
83626a499b71960a0cbd94990cb81f96a5cc2601
|
ff69aab96d76ac3dc5b93605617314c6eb2b257c
|
/gymmeforce/models/__init__.py
|
4cedb96b2487ce53f7fda441d4b1d2cc233798a2
|
[
"MIT"
] |
permissive
|
init27Lab/gymmeforce
|
a9fc7e455b8427ce2c3334e059ee6e532adb3384
|
12731bcf34de9e9a94fae085cdfe10e3f4e0d142
|
refs/heads/master
| 2021-09-01T12:30:16.508592
| 2017-12-27T01:35:21
| 2017-12-27T01:35:21
| 115,546,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
from gymmeforce.models.base_model import BaseModel
from gymmeforce.models.dqn_model import DQNModel
from gymmeforce.models.vanilla_pg_model import VanillaPGModel
from gymmeforce.models.ppo_model import PPOModel
|
[
"lucasgouvaz@gmail.com"
] |
lucasgouvaz@gmail.com
|
c439df6bafaa4167d39bfcd0250e95a1dca7f532
|
79197ddad40d2780a8f291bffa2cb58509c055bb
|
/Menus/menu-1.py
|
d09c2355e97f9093a2030524511a2994180fe946
|
[] |
no_license
|
Adrien-FILIPPI/Hackbox
|
6dd0c66e0fa72a249ee06c6064cbc9bb66eeaa7b
|
c58495503abc2948ae1d6e2ea1705ee48192f1df
|
refs/heads/master
| 2021-01-25T09:37:59.960101
| 2019-01-08T16:04:38
| 2019-01-08T16:04:38
| 93,864,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,712
|
py
|
#!/usr/bin/env python
import kalipi
from kalipi import *
#############################
## Local Functions ##
# Check VNC status
def check_vnc():
if 'vnc :1' in commands.getoutput('/bin/ps -ef'):
return True
else:
return False
# Check Terminal session status
def check_terminal():
if 'SCREEN -R -S term' in commands.getoutput('/bin/ps -ef'):
return True
else:
return False
## Local Functions ##
#############################
#############################
## Buttons ##
# define all of the buttons
titleButton = Button(" " + kalipi.get_hostname() + " " + kalipi.get_ip(), originX, originX, buttonHeight, buttonWidth * 3 + spacing * 2, tron_blu, tron_ora, titleFont)
button1 = Button(labelPadding * " " + " Exit", originX, originY, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button2 = Button(labelPadding * " " + " X on TFT", originX + buttonWidth + spacing, originY, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button3 = Button(labelPadding * " " + " X on HDMI", originX + (buttonWidth * 2) + (spacing * 2), originY, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button4 = Button(labelPadding * " " + " Shutdown", originX, originY + buttonHeight + spacing, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button5 = Button(labelPadding * " " + " Find IP", originX + buttonWidth + spacing, originY + buttonHeight + spacing, buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button6 = Button(labelPadding * " " + " Terminal", originX + (buttonWidth * 2) + (spacing * 2), originY + buttonHeight + spacing, buttonHeight, buttonWidth, tron_blu,tron_whi, labelFont)
button7 = Button(labelPadding * " " + " Reboot", originX, originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button8 = Button(labelPadding * " " + " Screen Off", originX + buttonWidth + spacing, originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
button9 = Button(labelPadding * " " + " >>>", originX + (buttonWidth * 2) + (spacing * 2), originY + (buttonHeight * 2) + (spacing * 2), buttonHeight, buttonWidth, tron_blu, tron_whi, labelFont)
# Define each button press action
def button(number):
if number == 1:
if button1.disable == 1:
return
# Exit
process = subprocess.call("setterm -term linux -back default -fore white -clear all", shell=True)
pygame.quit()
sys.exit(37)
if number == 2:
if button2.disable == 1:
return
# X TFT
pygame.quit()
## Requires "Anybody" in dpkg-reconfigure x11-common if we have scrolled pages previously
## kalipi.run_cmd("/usr/bin/sudo -u pi FRAMEBUFFER=/dev/fb1 startx")
kalipi.run_cmd("/usr/bin/sudo FRAMEBUFFER=/dev/fb1 startx")
os.execv(__file__, sys.argv)
if number == 3:
if button3.disable == 1:
return
# X HDMI
pygame.quit()
## Requires "Anybody" in dpkg-reconfigure x11-common if we have scrolled pages previously
## kalipi.run_cmd("/usr/bin/sudo -u pi FRAMEBUFFER=/dev/fb0 startx")
kalipi.run_cmd("/usr/bin/sudo FRAMEBUFFER=/dev/fb0 startx")
os.execv(__file__, sys.argv)
if number == 4:
if button4.disable == 1:
return
# Shutdown
pygame.quit()
kalipi.run_cmd("/usr/bin/sudo /sbin/shutdown -h now")
sys.exit()
if number == 5:
if button5.disable == 1:
return
# Find IP
pygame.quit()
kalipi.run_cmd("/opt/hackbox/findip.sh")
os.execv(__file__, sys.argv)
if number == 6:
if button6.disable == 1:
return
# Terminal
process = subprocess.call("setterm -term linux -back default -fore white -clear all", shell=True)
pygame.quit()
kalipi.run_cmd("/usr/bin/sudo -u pi screen -R -S term")
process = subprocess.call("setterm -term linux -back default -fore black -clear all", shell=True)
os.execv(__file__, sys.argv)
if check_terminal():
button6.fntColor = green
button6.draw()
pygame.display.update()
else:
button6.fntColor = tron_whi
button6.draw()
pygame.display.update()
return
if number == 7:
if button7.disable == 1:
return
# Reboot
pygame.quit()
kalipi.run_cmd("/usr/bin/sudo /sbin/shutdown -r now")
sys.exit()
if number == 8:
if button8.disable == 1:
return
# Lock
retPage="menu-1.py"
kalipi.screensaver(retPage)
menu1()
if number == 9:
if button9.disable == 1:
return
# Next page
pygame.quit()
page=os.environ["MENUDIR"] + "menu-2.py"
os.execvp("python", ["python", page])
sys.exit()
## Buttons ##
#############################
def menu1():
# Init Pygame
kalipi.screen()
# Outer Border
kalipi.border(tron_blu)
#############################
## Buttons ##
# Buttons and labels
# See variables at the top of the document to adjust the menu
# Title
titleButton.draw()
# First Row
# Button 1
button1.disable = 0 # "1" disables button
if button1.disable == 1:
button1.draw()
else:
# Add button launch code here
button1.fntColor = yellow
button1.draw()
# Button 2
button2.disable = 0 # "1" disables button
if button2.disable == 1:
button2.draw()
else:
# Add button launch code here
button2.draw()
# Button 3
button3.disable = 0 # "1" disables button
if button3.disable == 1:
button3.draw()
else:
# Add button launch code here
button3.draw()
# Second Row
# Button 4
button4.disable = 0 # "1" disables button
if button4.disable == 1:
button4.draw()
else:
# Add button launch code here
button4.fntColor = yellow
button4.draw()
# Button 5
button5.disable = 0 # "1" disables button
if button5.disable == 1:
button5.draw()
else:
# Add button launch code here
if check_vnc():
button5.fntColor = green
button5.draw()
else:
button5.fntColor = tron_whi
button5.draw()
# Button 6
button6.disable = 0 # "1" disables button
if button6.disable == 1:
button6.draw()
else:
# Add button launch code here
if check_terminal():
button6.fntColor = green
button6.draw()
else:
button6.fntColor = tron_whi
button6.draw()
# Third Row
# Button 7
button7.disable = 0 # "1" disables button
if button7.disable == 1:
button7.draw()
else:
# Add button launch code here
button7.fntColor = yellow
button7.draw()
# Button 8
button8.disable = 0 # "1" disables button
if button8.disable == 1:
button8.draw()
else:
# Add button launch code here
button8.draw()
# Button 9
button9.disable = 0 # "1" disables button
if button9.disable == 1:
button9.draw()
else:
# Add button launch code here
button9.draw()
## Buttons ##
#############################
#############################
## Input loop ##
while 1:
butNo=kalipi.inputLoop("menu-1.py")
button(butNo)
## Input loop ##
#############################
if __name__ == "__main__":
menu1()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
05c69d86a598279fd05f359ed4b55dbf7789ecbb
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/96/usersdata/212/52473/submittedfiles/estatistica.py
|
833c7315b45ff153b15c5cbf6daf19fb0e037238
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
# -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio(lista):
soma=0
dp=0
m=media(lista)
n=len(lista)
for i in range(0,n,1):
soma=soma+(lista[i]-(media(lista)))**2
soma=((soma/n-1))**(1/2)
return soma
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
n1=int(input('digite o número de elemetos da primeira lista:'))
l1=[]
i=0
while i<n1:
elemento=float(input('digite um número:'))
l1.append(elemento)
i=i+1
n2=int(input('digite o número de elemetos da segunda lista:'))
l2=[]
i=0
while i<n2:
elemento=float(input('digite um número:'))
l2.append(elemento)
i=i+1
m1=media(l1)
print('%.2f'%m1)
dp1=desvio(l1)
print(dp1)
m2=media(l2)
print('%.2f'%m2)
dp2=desvio(l2)
print(dp2)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
6a4ee598f47454db0493b131fcdf35f2e2f999bb
|
66d04b8c46feef85c1666a4ba467124ee2c18450
|
/lambdas/layer/.chalice/config.json.template.py
|
3d375499fa4760654e91540304e9ac9cb68879d9
|
[
"Apache-2.0"
] |
permissive
|
DataBiosphere/azul
|
767abdefafbaf714fb78e5ee1aa5224b07fa7ec1
|
3722323d4eed3089d25f6d6c9cbfb1672b7de939
|
refs/heads/develop
| 2023-09-01T12:26:21.387100
| 2023-09-01T00:01:46
| 2023-09-01T00:01:46
| 139,095,537
| 23
| 22
|
Apache-2.0
| 2023-09-14T18:00:44
| 2018-06-29T03:18:14
|
Python
|
UTF-8
|
Python
| false
| false
| 263
|
py
|
from azul import config
from azul.template import emit
emit({
"version": "2.0",
"app_name": config.qualified_resource_name("dependencies"),
"api_gateway_stage": config.deployment_stage,
"manage_iam_role": False,
"lambda_memory_size": 128,
})
|
[
"hannes@ucsc.edu"
] |
hannes@ucsc.edu
|
e71c296d26a1fc078ab5b6286026948ef2b23459
|
0beaf9d78d03100b2aebaaac38fb343d425f2b6a
|
/tests/regression/gsheet/test_chrome_gsheet_100r_number_utf8chars.py
|
57e29b0c39b7eec92f89400c4f2f06c356e2ae5d
|
[] |
no_license
|
digitarald/Hasal
|
462fc044bb4a754c8d76c0bfb0df519f1786fdcc
|
c496afae6ec2e3743148f3a6288b78f120100513
|
refs/heads/master
| 2021-01-13T14:29:44.471037
| 2016-11-04T10:49:19
| 2016-11-04T10:49:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from lib.perfBaseTest import PerfBaseTest
class TestSikuli(PerfBaseTest):
def setUp(self):
super(TestSikuli, self).setUp()
def test_chrome_gsheet_100r_number_utf8chars(self):
self.test_url = self.env.GSHEET_TEST_URL_SPEC % self.env.TEST_TARGET_ID_100R_NUMBER_UTF8CHAR
self.sikuli_status = self.sikuli.run_test(self.env.test_name, self.env.output_name, test_target=self.test_url, script_dp=self.env.test_script_py_dp)
|
[
"sho@mozilla.com"
] |
sho@mozilla.com
|
252a725708758cf720a94811657ecfdfd0b1d90d
|
0206ac23a29673ee52c367b103dfe59e7733cdc1
|
/src/crcm5/analyse_hdf/lake_effect_on_streamflow_quantiles.py
|
f129054251e6b9dd8519fa5d776392060593cf5a
|
[] |
no_license
|
guziy/RPN
|
2304a93f9ced626ae5fc8abfcc079e33159ae56a
|
71b94f4c73d4100345d29a6fbfa9fa108d8027b5
|
refs/heads/master
| 2021-11-27T07:18:22.705921
| 2021-11-27T00:54:03
| 2021-11-27T00:54:03
| 2,078,454
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,676
|
py
|
import os
from datetime import datetime
import brewer2mpl
from matplotlib.axes import Axes
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from crcm5 import infovar
from data import cehq_station
from data.cehq_station import Station
from data.cell_manager import CellManager
from . import do_analysis_using_pytables as analysis
import matplotlib.pyplot as plt
import numpy as np
__author__ = 'huziy'
images_folder = "/home/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/images_for_lake-river_paper"
from . import common_plot_params as cpp
def plot_one_to_one_line(the_ax):
assert isinstance(the_ax, Axes)
x1, x2 = the_ax.get_xlim()
y1, y2 = the_ax.get_ylim()
lims = [x1, x2, y1, y2]
z = min(lims), max(lims)
the_ax.plot(z, z, "-.k")
def main():
start_year = 1980
end_year = 2010
start_date = datetime(start_year, 1, 1)
end_date = datetime(end_year, 12, 31)
ids_with_lakes_upstream = [
"104001", "093806", "093801", "081002", "081007", "080718"
]
selected_station_ids = ["092715", "074903", "080104", "081007", "061905",
"093806", "090613", "081002", "093801", "080718", "104001"]
selected_station_ids = ids_with_lakes_upstream
# Get the list of stations to do the comparison with
stations = cehq_station.read_station_data(
start_date=start_date,
end_date=end_date,
selected_ids=selected_station_ids
)
# add hydat stations
# province = "QC"
# min_drainage_area_km2 = 10000.0
# stations_hd = cehq_station.load_from_hydat_db(start_date=start_date, end_date=end_date,
# province=province, min_drainage_area_km2=min_drainage_area_km2)
# if not len(stations_hd):
# print "No hydat stations satisying the conditions: period {0}-{1}, province {2}".format(
# str(start_date), str(end_date), province
# )
# stations.extend(stations_hd)
# brewer2mpl.get_map args: set name set type number of colors
bmap = brewer2mpl.get_map("Set1", "qualitative", 9)
path1 = "/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-r.hdf5"
label1 = "CRCM5-L1"
path2 = "/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-rl.hdf5"
label2 = "CRCM5-L2"
color2, color1 = bmap.mpl_colors[:2]
fldirs = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_FLOW_DIRECTIONS_NAME)
lons2d, lats2d, basemap = analysis.get_basemap_from_hdf(path1)
lake_fractions = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_LAKE_FRACTION_NAME)
# cell_areas = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_CELL_AREA_NAME)
acc_area = analysis.get_array_from_file(path=path1, var_name=infovar.HDF_ACCUMULATION_AREA_NAME)
cell_manager = CellManager(fldirs, lons2d=lons2d, lats2d=lats2d, accumulation_area_km2=acc_area)
station_to_mp = cell_manager.get_model_points_for_stations(station_list=stations,
lake_fraction=lake_fractions,
drainaige_area_reldiff_limit=0.3)
fig, axes = plt.subplots(1, 2, gridspec_kw=dict(top=0.80, wspace=0.4))
q90_obs_list = []
q90_mod1_list = []
q90_mod2_list = []
q10_obs_list = []
q10_mod1_list = []
q10_mod2_list = []
for the_station, the_mp in station_to_mp.items():
assert isinstance(the_station, Station)
compl_years = the_station.get_list_of_complete_years()
if len(compl_years) < 3:
continue
t, stfl1 = analysis.get_daily_climatology_for_a_point(path=path1, years_of_interest=compl_years,
i_index=the_mp.ix, j_index=the_mp.jy, var_name="STFA")
_, stfl2 = analysis.get_daily_climatology_for_a_point(path=path2, years_of_interest=compl_years,
i_index=the_mp.ix, j_index=the_mp.jy, var_name="STFA")
_, stfl_obs = the_station.get_daily_climatology_for_complete_years(stamp_dates=t, years=compl_years)
# Q90
q90_obs = np.percentile(stfl_obs, 90)
q90_mod1 = np.percentile(stfl1, 90)
q90_mod2 = np.percentile(stfl2, 90)
# Q10
q10_obs = np.percentile(stfl_obs, 10)
q10_mod1 = np.percentile(stfl1, 10)
q10_mod2 = np.percentile(stfl2, 10)
# save quantiles to lists for correlation calculation
q90_obs_list.append(q90_obs)
q90_mod1_list.append(q90_mod1)
q90_mod2_list.append(q90_mod2)
q10_mod1_list.append(q10_mod1)
q10_mod2_list.append(q10_mod2)
q10_obs_list.append(q10_obs)
# axes[0].annotate(the_station.id, (q90_obs, np.percentile(stfl1, 90)))
# axes[1].annotate(the_station.id, (q10_obs, np.percentile(stfl1, 10)))
# Plot scatter plot of Q90
the_ax = axes[0]
# the_ax.annotate(the_station.id, (q90_obs, np.percentile(stfl1, 90)))
the_ax.scatter(q90_obs_list, q90_mod1_list, label=label1, c=color1)
the_ax.scatter(q90_obs_list, q90_mod2_list, label=label2, c=color2)
# plot scatter plot of Q10
the_ax = axes[1]
# the_ax.annotate(the_station.id, (q10_obs, np.percentile(stfl1, 10)))
h1 = the_ax.scatter(q10_obs_list, q10_mod1_list, label=label1, c=color1)
h2 = the_ax.scatter(q10_obs_list, q10_mod2_list, label=label2, c=color2)
# Add correlation coefficients to the axes
fp = FontProperties(size=14, weight="bold")
axes[0].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q90_mod1_list, q90_obs_list)[0, 1] ** 2),
(0.1, 0.85), color=color1, xycoords="axes fraction", font_properties=fp)
axes[0].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q90_mod2_list, q90_obs_list)[0, 1] ** 2),
(0.1, 0.70), color=color2, xycoords="axes fraction", font_properties=fp)
axes[1].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q10_mod1_list, q10_obs_list)[0, 1] ** 2),
(0.1, 0.85), color=color1, xycoords="axes fraction", font_properties=fp)
axes[1].annotate(r"$R^2 = {0:.2f}$".format(np.corrcoef(q10_mod2_list, q10_obs_list)[0, 1] ** 2),
(0.1, 0.70), color=color2, xycoords="axes fraction", font_properties=fp)
sf = ScalarFormatter(useMathText=True)
sf.set_powerlimits((-2, 3))
for ind, the_ax in enumerate(axes):
plot_one_to_one_line(the_ax)
if ind == 0:
the_ax.set_xlabel(r"Observed $\left({\rm m^3/s} \right)$")
the_ax.set_ylabel(r"Modelled $\left({\rm m^3/s} \right)$")
the_ax.annotate(r"$Q_{90}$" if ind == 0 else r"$Q_{10}$",
(0.95, 0.95), xycoords="axes fraction",
bbox=dict(facecolor="white"),
va="top", ha="right")
the_ax.xaxis.set_major_formatter(sf)
the_ax.yaxis.set_major_formatter(sf)
locator = MaxNLocator(nbins=5)
the_ax.xaxis.set_major_locator(locator)
the_ax.yaxis.set_major_locator(locator)
x1, x2 = the_ax.get_xlim()
# Since streamflow percentiles can only be positive
the_ax.set_xlim(0, x2)
the_ax.set_ylim(0, x2)
fig.legend([h1, h2], [label1, label2], loc="upper center", ncol=2)
figpath = os.path.join(images_folder, "percentiles_comparison.png")
# plt.tight_layout()
fig.savefig(figpath, dpi=cpp.FIG_SAVE_DPI, bbox_inches="tight")
if __name__ == "__main__":
import application_properties
application_properties.set_current_directory()
main()
|
[
"guziy.sasha@gmail.com"
] |
guziy.sasha@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.