blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ec62fa65d189134f96ad76d02130a15a9598acd
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/web/latest/web_app_swift_virtual_network_connection.py
|
3e8952e7afad52a8498263aaa96cb79bcd78486d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 6,147
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['WebAppSwiftVirtualNetworkConnection']
class WebAppSwiftVirtualNetworkConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet_resource_id: Optional[pulumi.Input[str]] = None,
swift_supported: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] subnet_resource_id: The Virtual Network subnet's resource ID. This is the subnet that this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined first.
:param pulumi.Input[bool] swift_supported: A flag that specifies if the scale unit this Web App is on supports Swift integration.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['kind'] = kind
if name is None:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['subnet_resource_id'] = subnet_resource_id
__props__['swift_supported'] = swift_supported
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppSwiftVirtualNetworkConnection"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppSwiftVirtualNetworkConnection"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppSwiftVirtualNetworkConnection"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppSwiftVirtualNetworkConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppSwiftVirtualNetworkConnection, __self__).__init__(
'azure-nextgen:web/latest:WebAppSwiftVirtualNetworkConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppSwiftVirtualNetworkConnection':
"""
Get an existing WebAppSwiftVirtualNetworkConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppSwiftVirtualNetworkConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="subnetResourceId")
def subnet_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
The Virtual Network subnet's resource ID. This is the subnet that this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined first.
"""
return pulumi.get(self, "subnet_resource_id")
@property
@pulumi.getter(name="swiftSupported")
def swift_supported(self) -> pulumi.Output[Optional[bool]]:
"""
A flag that specifies if the scale unit this Web App is on supports Swift integration.
"""
return pulumi.get(self, "swift_supported")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
1561dc7a0d4fdcc7d078decb8d7a38e0e0838680
|
8600ea155f279e5a8dfe5a1926038511f6b6a7ea
|
/membership/wizard/__init__.py
|
ab1f292ce8f004d75e6a9cebe2c074fb580a2a84
|
[] |
no_license
|
MarkNorgate/addons-EAD
|
c2fff89ab16fce3ba19fbe433ee5863705a6f4e5
|
840f28642b5d328e4b86839c413e5164622295a5
|
refs/heads/master
| 2020-04-23T22:11:00.164438
| 2015-07-22T12:24:53
| 2015-07-22T12:24:53
| 39,501,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import invoice_membership
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"mark.norgate@affinity-digital.com"
] |
mark.norgate@affinity-digital.com
|
e9e2037b55eee5289c172664d2628b589cfd35ef
|
b7620d0f1a90390224c8ab71774b9c906ab3e8e9
|
/aliyun-python-sdk-live/aliyunsdklive/request/v20161101/DescribeLiveStreamWatermarksRequest.py
|
75fdfa4674d65607a75e94f5a4e7d72a1d5becb4
|
[
"Apache-2.0"
] |
permissive
|
YaoYinYing/aliyun-openapi-python-sdk
|
e9c62940baee1a35b9ec4a9fbd1e4eb0aaf93b2f
|
e9a93cc94bd8290d1b1a391a9cb0fad2e6c64627
|
refs/heads/master
| 2022-10-17T16:39:04.515562
| 2022-10-10T15:18:34
| 2022-10-10T15:18:34
| 117,057,304
| 0
| 0
| null | 2018-01-11T06:03:02
| 2018-01-11T06:03:01
| null |
UTF-8
|
Python
| false
| false
| 1,844
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class DescribeLiveStreamWatermarksRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'DescribeLiveStreamWatermarks','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
ae433691f368227705fc5d0df3b181e7c53763e9
|
25ba4b387bf6bb278af1335b18e99ea0389548be
|
/think_python_solutions/chapter-06/exercise-6.8.py
|
9b6ed0d969a178a8c2633a592518055b1d9abade
|
[] |
no_license
|
adwanAK/adwan_python_core
|
56ffc63b326ec3f6739bc4733c060dc49554f1ae
|
745922331cf36f65376c55115ee9a3df4d5c0450
|
refs/heads/master
| 2022-12-12T21:35:42.387685
| 2018-09-26T08:53:43
| 2018-09-26T08:53:43
| 148,442,004
| 2
| 1
| null | 2022-12-08T02:23:54
| 2018-09-12T07:46:34
|
Python
|
UTF-8
|
Python
| false
| false
| 841
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
exercise-6.8.py
Greatest commond divisor with Euclid's algorithm. PITA to realize that we must return the function vaule, not simply invoke the gcd function via recursion.
Created by Terry Bates on 2012-08-19.
Copyright (c) 2012 http://the-awesome-python-blog.posterous.com.
All rights reserved."""
def gcd(a,b):
# catch base case from the door
if b == 0:
return a
# otherwise, do the other steps
else:
# compute remainder
remainder = a % b
# If we have base case, no need to
# call again
if remainder == 0:
return b
# otherwise, we recurse and grab the return value
else:
return gcd(b, remainder)
if __name__ == '__main__':
print gcd(9, 3)
print gcd(15, 12)
print gcd(24,8)
|
[
"breuss.martin@gmail.com"
] |
breuss.martin@gmail.com
|
f75a4829b19ba35e524480bfe8810f6d7f6964e9
|
81aaa9ffcbdddf0b6f99a5c4c84adf179fef8f3e
|
/maml_rl/tests/utils/test_torch_utils.py
|
3c9d2a02ccef8f45ec2b222cc40ed8c90c499847
|
[
"MIT"
] |
permissive
|
dkkim93/pytorch-maml-rl
|
5ffd728d356224136244f46bc32b18c72568d007
|
6dd0dd7dbf2e8aed29e5c3bbab9f42cfd780c99a
|
refs/heads/master
| 2021-07-09T07:28:40.184950
| 2020-08-10T03:50:10
| 2020-08-10T03:50:10
| 174,039,301
| 1
| 0
|
MIT
| 2019-03-06T00:01:14
| 2019-03-06T00:01:13
| null |
UTF-8
|
Python
| false
| false
| 3,711
|
py
|
import pytest
import numpy as np
import torch
import torch.nn as nn
from maml_rl.utils.torch_utils import (weighted_mean, weighted_normalize,
vector_to_parameters)
def test_weighted_mean():
lengths = [2, 3, 7, 5, 11]
# Inputs
inputs_np = np.random.rand(13, 5).astype(np.float32)
for i, length in enumerate(lengths):
inputs_np[length:, i] = 0.
# Pytorch
inputs_th = torch.as_tensor(inputs_np)
mean_th = weighted_mean(inputs_th, lengths=lengths)
# Numpy
mean_np = np.zeros((5,), dtype=np.float32)
for i, length in enumerate(lengths):
for j in range(13):
if j < length:
mean_np[i] += inputs_np[j, i]
mean_np[i] /= length
assert mean_th.dim() == 1
assert mean_th.shape == (5,)
np.testing.assert_allclose(mean_th.detach().numpy(), mean_np)
def test_weighted_mean_multi_dimensional():
lengths = [2, 3, 7, 5, 11]
# Inputs
inputs_np = np.random.rand(13, 5, 17, 19).astype(np.float32)
for i, length in enumerate(lengths):
inputs_np[length:, i] = 0.
# Pytorch
inputs_th = torch.as_tensor(inputs_np)
mean_th = weighted_mean(inputs_th, lengths=lengths)
# Numpy
mean_np = np.zeros((5, 17, 19), dtype=np.float32)
for i, length in enumerate(lengths):
for j in range(13):
if j < length:
mean_np[i] += inputs_np[j, i]
mean_np[i] /= length
assert mean_th.dim() == 3
assert mean_th.shape == (5, 17, 19)
np.testing.assert_allclose(mean_th.detach().numpy(), mean_np)
def test_weighted_mean_side_effect():
lengths = [2, 3, 7, 5, 11]
# Inputs
inputs_np = np.random.rand(13, 5).astype(np.float32)
# Pytorch
inputs_th = torch.as_tensor(inputs_np)
mean_th = weighted_mean(inputs_th, lengths=lengths)
for i, length in enumerate(lengths):
assert (inputs_th[length:, i] == 0.).all()
assert (inputs_np[length:, i] == 0.).all()
def test_weighted_normalize():
lengths = [2, 3, 7, 5, 11]
# Inputs
inputs_np = np.random.rand(13, 5).astype(np.float32)
# Pytorch
inputs_th = torch.as_tensor(inputs_np)
normalized_th = weighted_normalize(inputs_th, lengths=lengths)
for i, length in enumerate(lengths):
assert (normalized_th[length:, i] == 0.).all()
def test_vector_to_parameters_no_shared_memory():
model = nn.Sequential(
nn.Linear(2, 3, bias=True),
nn.Linear(3, 5, bias=True))
num_params = (2 * 3) + 3 + (3 * 5) + 5
vector_np = np.random.rand(num_params).astype(np.float32)
vector = torch.as_tensor(vector_np)
vector_to_parameters(vector, model.parameters())
pointer = 0
for param in model.parameters():
num_param = param.numel()
param_np = param.view(-1).detach().numpy()
np.testing.assert_array_equal(param_np, vector_np[pointer:pointer + num_param])
pointer += num_param
def test_vector_to_parameters_shared_memory():
model = nn.Sequential(
nn.Linear(2, 3, bias=True),
nn.Linear(3, 5, bias=True))
model.share_memory()
for param in model.parameters():
assert param.data.is_shared()
num_params = (2 * 3) + 3 + (3 * 5) + 5
vector_np = np.random.rand(num_params).astype(np.float32)
vector = torch.as_tensor(vector_np)
vector_to_parameters(vector, model.parameters())
pointer = 0
for param in model.parameters():
num_param = param.numel()
param_np = param.view(-1).detach().numpy()
np.testing.assert_array_equal(param_np, vector_np[pointer:pointer + num_param])
assert param.data.is_shared()
pointer += num_param
|
[
"tristan.deleu@gmail.com"
] |
tristan.deleu@gmail.com
|
51f33cc29c4bf5e9a4b13ce80679ec1ace1b8c61
|
a298d0b4a3e9e12170651a6bf728093b4badfac7
|
/LeetCode/394 - Decode String/decodeString.py
|
98d734f3743b24e348b307092d67a1c6b79350b9
|
[] |
no_license
|
gavinz0228/AlgoPractice
|
fc8ecd194ea2d26de59df45909838161c802b8cd
|
1cb183a326a0612a5cd941778500a8265e1d7255
|
refs/heads/master
| 2022-07-27T11:42:06.887668
| 2022-07-18T20:38:31
| 2022-07-18T20:38:31
| 172,929,652
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
class Solution(object):
def decodeString(self, s):
"""
:type s: str
:rtype: str
"""
return self.getString(s)[0]
def getChars(self, s):
if not s:
return "",""
if ord(s[0]) > 57:
for i in range(len(s)):
if ord(s[i]) <= 57 or s[i]== "]":
return s[:i] , s[i:]
return s, ""
else:
return "", s
def getString(self, s):
result = []
while s and s[0] != "]":
_s, _tail = self.getChars(s)
__s, __tail = self.getNum(_tail)
result.append(_s+__s)
s = __tail
return "".join(result), s
def getNum(self, s):
if not s:
return "",""
if ord(s[0]) <= 57:
lb = s.index("[")
chars, tail = self.getString( s[lb+1:] )
n = int(s[:lb])
return n*chars, tail[1:]
else:
return "", s
|
[
"gavinz0228@gmail.com"
] |
gavinz0228@gmail.com
|
8f9b74e584166ea6b796806111f5fc467dbaebf4
|
f36b733f9c24d4cabd0d3354e0344094fbf3c026
|
/a10_saltstack/helpers/helper_modules/a10_cgnv6_lsn.py
|
cd9e9303c79271c0cf98e7829c74d1a2739c5a8a
|
[
"Apache-2.0"
] |
permissive
|
a10networks/a10-saltstack
|
08e13647e0187b09500ed3d9053ae06e7e808746
|
0d86043b1d09e75ea170e72fac5068254fc4037c
|
refs/heads/master
| 2021-03-19T16:11:14.211706
| 2019-07-24T17:18:04
| 2019-07-24T17:18:04
| 123,501,933
| 2
| 3
| null | 2019-07-24T17:18:05
| 2018-03-01T22:55:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
# Copyright 2019 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = ["alg","endpoint_independent_filtering","endpoint_independent_mapping","global","health_check_gateway_list","inside","performance","port_overloading","port_reservation_list","radius","stun_timeout","tcp",]
REF_PROPERTIES = {
"alg": "/axapi/v3/cgnv6/lsn/alg",
"endpoint_independent_filtering": "/axapi/v3/cgnv6/lsn/endpoint-independent-filtering",
"endpoint_independent_mapping": "/axapi/v3/cgnv6/lsn/endpoint-independent-mapping",
"global": "/axapi/v3/cgnv6/lsn/global",
"health_check_gateway_list": "/axapi/v3/cgnv6/lsn/health-check-gateway/{ipv4-addr}+{ipv6-addr}",
"inside": "/axapi/v3/cgnv6/lsn/inside",
"performance": "/axapi/v3/cgnv6/lsn/performance",
"port_overloading": "/axapi/v3/cgnv6/lsn/port-overloading",
"port_reservation_list": "/axapi/v3/cgnv6/lsn/port-reservation/{inside}+{inside-port-start}+{inside-port-end}+{nat}+{nat-port-start}+{nat-port-end}",
"radius": "/axapi/v3/cgnv6/lsn/radius",
"stun_timeout": "/axapi/v3/cgnv6/lsn/stun-timeout",
"tcp": "/axapi/v3/cgnv6/lsn/tcp",
}
MODULE_NAME = "lsn"
PARENT_KEYS = []
CHILD_KEYS = []
def new_url(**kwargs):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/cgnv6/lsn"
f_dict = {}
return url_base.format(**f_dict)
def existing_url(**kwargs):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/cgnv6/lsn"
f_dict = {}
return url_base.format(**f_dict)
|
[
"thompson.grey.hunter@gmail.com"
] |
thompson.grey.hunter@gmail.com
|
b032bab5b656e6bb9d18d5ee3d3dded0406cb869
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-ocr/huaweicloudsdkocr/v1/model/recognize_thailand_license_plate_request.py
|
c1acf5879b063b5598ca85dc49a3cc438cd447aa
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 6,709
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecognizeThailandLicensePlateRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'enterprise_project_id': 'str',
'body': 'ThailandLicensePlateRequestBody'
}
attribute_map = {
'enterprise_project_id': 'Enterprise-Project-Id',
'body': 'body'
}
def __init__(self, enterprise_project_id=None, body=None):
"""RecognizeThailandLicensePlateRequest
The model defined in huaweicloud sdk
:param enterprise_project_id: 企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:type enterprise_project_id: str
:param body: Body of the RecognizeThailandLicensePlateRequest
:type body: :class:`huaweicloudsdkocr.v1.ThailandLicensePlateRequestBody`
"""
self._enterprise_project_id = None
self._body = None
self.discriminator = None
if enterprise_project_id is not None:
self.enterprise_project_id = enterprise_project_id
if body is not None:
self.body = body
@property
def enterprise_project_id(self):
"""Gets the enterprise_project_id of this RecognizeThailandLicensePlateRequest.
企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:return: The enterprise_project_id of this RecognizeThailandLicensePlateRequest.
:rtype: str
"""
return self._enterprise_project_id
@enterprise_project_id.setter
def enterprise_project_id(self, enterprise_project_id):
"""Sets the enterprise_project_id of this RecognizeThailandLicensePlateRequest.
企业项目ID。OCR支持通过企业项目管理(EPS)对不同用户组和用户的资源使用,进行分账。 获取方法:进入“[企业项目管理](https://console-intl.huaweicloud.com/eps/?region=ap-southeast-2#/projects/list)”页面,单击企业项目名称,在企业项目详情页获取Enterprise-Project-Id(企业项目ID)。 企业项目创建步骤请参见用户指南。 > 说明: 创建企业项目后,在传参时,有以下三类场景。 - 携带正确的ID,正常使用OCR服务,账单归到企业ID对应的企业项目中。 - 携带错误的ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。 - 不携带ID,正常使用OCR服务,账单的企业项目会被分类为“未归集”。
:param enterprise_project_id: The enterprise_project_id of this RecognizeThailandLicensePlateRequest.
:type enterprise_project_id: str
"""
self._enterprise_project_id = enterprise_project_id
@property
def body(self):
"""Gets the body of this RecognizeThailandLicensePlateRequest.
:return: The body of this RecognizeThailandLicensePlateRequest.
:rtype: :class:`huaweicloudsdkocr.v1.ThailandLicensePlateRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RecognizeThailandLicensePlateRequest.
:param body: The body of this RecognizeThailandLicensePlateRequest.
:type body: :class:`huaweicloudsdkocr.v1.ThailandLicensePlateRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeThailandLicensePlateRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
6d49fc4df7c3b8e4dd925d25afe79a6b324151f9
|
78b160d8131f3c4b7aef0d051b040825a9c50e0d
|
/algoexpert/threeNumberSum.py
|
864ca5a9c129c8a28975b66b5644991a336c3890
|
[
"MIT"
] |
permissive
|
ardakkk/Algorithms-and-Data-Structures
|
744f8c9ffb233b95040e5bdcbddb9f5d2ff7a5ba
|
c428bb0bd7eeb6c34448630f88f13e1329b54636
|
refs/heads/master
| 2021-07-08T22:40:40.361282
| 2020-07-20T10:39:58
| 2020-07-20T10:39:58
| 156,005,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
# Time: 0(n^2) | Space: O(n)
def threeNumberSum(array, target_sum):
array.sort()
triplets = []
for i in range(len(array) - 2):
left = i + 1
right = len(array) - 1
while left < right:
current_sum = array[i] = array[left] + array[right]
if current_sum == target_sum:
triplets.append([array[i], array[left], array[right]])
left += 1
right -= 1
elif current_sum < target_sum:
left += 1
elif current_sum > target_sum:
right -= 1
return triplets
print(threeNumberSum([-1, 0, 1, 2, -1, -4], 0))
|
[
"ardakfuse@gmail.com"
] |
ardakfuse@gmail.com
|
c780e1290022738f2784cc181bb9b35646860418
|
315145ec1f997da0ac2dcedc221285b9a8aae3a9
|
/2016/get_tweetid_boundary.py
|
91f38626644492392eded816ed26277cf40cc24c
|
[] |
no_license
|
lukuang/2016-rts
|
55cc7f942ad674c325e1e906246873016e3b9678
|
bfe8c57711aa3243419ff4c0487e971eee667aa6
|
refs/heads/master
| 2021-01-19T04:21:53.707708
| 2019-01-03T23:14:46
| 2019-01-03T23:14:46
| 61,137,503
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
"""
generate tweet boundary for each day
"""
import os
import json
import sys
import re
import argparse
import codecs
import subprocess
def get_tweet_id(output):
for line in output.split("\n"):
tweetid_finder = re.search("<DOCNO>(\d+)",line)
if tweetid_finder:
return tweetid_finder.group(1)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--raw_tweet_dir","-rd",default="/infolab/headnode2/lukuang/2016-rts/data/raw/first/reparsed_text/")
parser.add_argument("dest_file")
args=parser.parse_args()
boundary = {}
for day_file_name in os.walk(args.raw_tweet_dir).next()[2]:
m = re.search("08-(\d+)",day_file_name)
if m:
day = m.group(1)
boundary[day] = {
"start":"",
"end":""
}
day_file = os.path.join(args.raw_tweet_dir,day_file_name)
p1 = subprocess.Popen(["head","-10",day_file], stdout=subprocess.PIPE)
head_output = p1.communicate()[0]
boundary[day]["start"] = get_tweet_id(head_output)
p2 = subprocess.Popen(["tail","-10",day_file], stdout=subprocess.PIPE)
tail_output = p2.communicate()[0]
boundary[day]["end"] = get_tweet_id(tail_output)
with open(args.dest_file,"w") as f:
f.write(json.dumps(boundary))
if __name__=="__main__":
main()
|
[
"lukuang1989@gmail.com"
] |
lukuang1989@gmail.com
|
484932b2584a92629dfb5af13608264e37cf192e
|
3b8ea489dcf2eea47c37156527a2727221e216f3
|
/virtual/lib/python3.6/site-packages/gunicorn/__init__.py
|
0edcf51907fe1909fae903ba27d6f143406cb4ac
|
[
"MIT"
] |
permissive
|
toelapiut/unsplash
|
d57ec93825ebae7107aea43823b1acbe4e5493f6
|
a93a694bef7214c64d248e20f3a6b904b463672d
|
refs/heads/master
| 2021-08-31T14:49:31.383197
| 2017-12-21T19:07:11
| 2017-12-21T19:07:11
| 111,072,300
| 5
| 0
|
MIT
| 2017-12-21T19:07:12
| 2017-11-17T07:31:39
|
Python
|
UTF-8
|
Python
| false
| false
| 255
|
py
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
version_info = (19, 7, 1)
__version__ = ".".join([str(v) for v in version_info])
SERVER_SOFTWARE = "gunicorn/%s" % __version__
|
[
"toelapiut7@gmail.com"
] |
toelapiut7@gmail.com
|
ac563116caaf0fd04a8dc147dae27c0f347f95fe
|
75fa11b13ddab8fd987428376f5d9c42dff0ba44
|
/metadata-ingestion/tests/integration/git/test_git_clone.py
|
3436c692f5d95399a53d6f910545ab484c6be3d1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] |
permissive
|
RyanHolstien/datahub
|
163d0ff6b4636919ed223ee63a27cba6db2d0156
|
8cf299aeb43fa95afb22fefbc7728117c727f0b3
|
refs/heads/master
| 2023-09-04T10:59:12.931758
| 2023-08-21T18:33:10
| 2023-08-21T18:33:10
| 246,685,891
| 0
| 0
|
Apache-2.0
| 2021-02-16T23:48:05
| 2020-03-11T21:43:58
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 3,961
|
py
|
import os
import pytest
from pydantic import SecretStr
from datahub.configuration.common import ConfigurationWarning
from datahub.configuration.git import GitInfo, GitReference
from datahub.ingestion.source.git.git_import import GitClone
LOOKML_TEST_SSH_KEY = os.environ.get("DATAHUB_LOOKML_GIT_TEST_SSH_KEY")
def test_base_url_guessing():
# Basic GitHub repo.
config = GitInfo(repo="https://github.com/datahub-project/datahub", branch="master")
assert config.repo_ssh_locator == "git@github.com:datahub-project/datahub.git"
# Defaults to GitHub.
config = GitInfo(repo="datahub-project/datahub", branch="master")
assert (
config.get_url_for_file_path("docker/README.md")
== "https://github.com/datahub-project/datahub/blob/master/docker/README.md"
)
assert config.repo_ssh_locator == "git@github.com:datahub-project/datahub.git"
# GitLab repo (notice the trailing slash).
config_ref = GitReference(
repo="https://gitlab.com/gitlab-tests/sample-project/", branch="master"
)
assert (
config_ref.get_url_for_file_path("hello_world.md")
== "https://gitlab.com/gitlab-tests/sample-project/-/blob/master/hello_world.md"
)
# Three-tier GitLab repo.
config = GitInfo(
repo="https://gitlab.com/gitlab-com/gl-infra/reliability", branch="master"
)
assert (
config.get_url_for_file_path("onboarding/gitlab.nix")
== "https://gitlab.com/gitlab-com/gl-infra/reliability/-/blob/master/onboarding/gitlab.nix"
)
assert (
config.repo_ssh_locator == "git@gitlab.com:gitlab-com/gl-infra/reliability.git"
)
# Overrides.
config = GitInfo(
repo="https://gitea.com/gitea/tea",
branch="main",
url_template="https://gitea.com/gitea/tea/src/branch/{branch}/{file_path}",
repo_ssh_locator="https://gitea.com/gitea/tea.git",
)
assert (
config.get_url_for_file_path("cmd/admin.go")
== "https://gitea.com/gitea/tea/src/branch/main/cmd/admin.go"
)
assert config.repo_ssh_locator == "https://gitea.com/gitea/tea.git"
# Deprecated: base_url.
with pytest.warns(ConfigurationWarning, match="base_url is deprecated"):
config = GitInfo.parse_obj(
dict(
repo="https://github.com/datahub-project/datahub",
branch="master",
base_url="http://mygithubmirror.local",
)
)
def test_github_branch():
config = GitInfo(
repo="owner/repo",
)
assert config.branch_for_clone is None
config = GitInfo(
repo="owner/repo",
branch="main",
)
assert config.branch_for_clone == "main"
def test_git_clone_public(tmp_path):
git_clone = GitClone(str(tmp_path))
checkout_dir = git_clone.clone(
ssh_key=None,
repo_url="https://gitlab.com/gitlab-tests/sample-project",
branch="90c439634077a85bcf42d38c2c79cd94664a94ad",
)
assert checkout_dir.exists()
assert set(os.listdir(checkout_dir)) == {
".git",
"README.md",
"hello_world.md",
"fork-sample-project.png",
}
@pytest.mark.skipif(
LOOKML_TEST_SSH_KEY is None,
reason="DATAHUB_LOOKML_GIT_TEST_SSH_KEY env variable is not configured",
)
def test_git_clone_private(tmp_path):
git_clone = GitClone(str(tmp_path))
secret_key = SecretStr(LOOKML_TEST_SSH_KEY) if LOOKML_TEST_SSH_KEY else None
checkout_dir = git_clone.clone(
ssh_key=secret_key,
repo_url="git@github.com:acryldata/long-tail-companions-looker",
branch="d380a2b777ec6f4653626f39c68dba85893faa74",
)
assert checkout_dir.exists()
assert set(os.listdir(checkout_dir)) == set(
[
".datahub",
"models",
"README.md",
".github",
".git",
"views",
"manifest_lock.lkml",
"manifest.lkml",
]
)
|
[
"noreply@github.com"
] |
RyanHolstien.noreply@github.com
|
2b811bc4a558ca355bc8465310d5280b36813baf
|
8ee8fe3c2acea497a85428bfb3dfde19e58b2bc3
|
/test-examples/issue_673_reproduce.py
|
aa8cc0108c7bfb6feebd605779c329efcf6936d2
|
[
"BSD-3-Clause"
] |
permissive
|
sofroniewn/image-demos
|
a6e46f08fd4ce621aa96d6b6378b50f63ac2b381
|
2eeeb23f34a47798ae7be0987182724ee3799eb8
|
refs/heads/master
| 2022-11-02T23:50:23.098830
| 2022-10-30T04:38:19
| 2022-10-30T04:38:19
| 179,378,745
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 729
|
py
|
"""
Test adding 4D followed by 5D image layers to the viewer
Intially only 2 sliders should be present, then a third slider should be
created.
"""
import numpy as np
from skimage import data, measure
import napari
import scipy.ndimage as ndi
image = data.binary_blobs(128, n_dim=3)
verts, faces, normals, values = (
measure.marching_cubes_lewiner(image.astype(float), level=0.7)
)
labels = ndi.label(image)[0]
vertex_labels = ndi.map_coordinates(labels, verts.T, order=0).astype(int)
with napari.gui_qt():
viewer = napari.view_surface((verts, faces, values))
surf_layer = viewer.add_surface((verts, faces, vertex_labels),
colormap='gist_earth')
viewer.dims.ndisplay = 3
|
[
"sofroniewn@gmail.com"
] |
sofroniewn@gmail.com
|
05f26dba9cb6933495ecf2bc530966099292e3d2
|
6c14069181f313e84eeb524dd495e3882156ef50
|
/samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-cfgmgr-rollback-act/nc-execute-xr-cfgmgr-rollback-act-42-ydk.py
|
9ef12861c74cd7aea090947f557c9240b9294995
|
[
"Apache-2.0"
] |
permissive
|
decolnz/ydk-py-samples
|
dde0fd64fd4df12a215588766a0f1fb8baf07fcd
|
7fa3f53c4d458c3332d372fb2fe3c46c5e036f07
|
refs/heads/master
| 2021-01-19T03:24:19.877929
| 2017-04-04T17:16:46
| 2017-04-04T17:16:46
| 87,310,389
| 1
| 0
| null | 2017-04-05T13:06:57
| 2017-04-05T13:06:57
| null |
UTF-8
|
Python
| false
| false
| 3,204
|
py
|
#!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Execute RPC for model Cisco-IOS-XR-cfgmgr-rollback-act.
usage: nc-execute-xr-cfgmgr-rollback-act-42-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import ExecutorService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_cfgmgr_rollback_act \
as xr_cfgmgr_rollback_act
import logging
def prepare_roll_back_configuration_to_exclude_rpc(roll_back_configuration_to_exclude_rpc):
"""Add RPC input data to roll_back_configuration_to_exclude_rpc object."""
# roll back to (but excluding) specific commit id
roll_back_configuration_to_exclude_rpc.input.comment = "Simple programmatic rollback"
roll_back_configuration_to_exclude_rpc.input.commit_id = "1000000010"
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create executor service
executor = ExecutorService()
roll_back_configuration_to_exclude_rpc = xr_cfgmgr_rollback_act.RollBackConfigurationToExcludeRpc() # create object
prepare_roll_back_configuration_to_exclude_rpc(roll_back_configuration_to_exclude_rpc) # add RPC input
# execute RPC on NETCONF device
executor.execute_rpc(provider, roll_back_configuration_to_exclude_rpc)
provider.close()
exit()
# End of script
|
[
"saalvare@cisco.com"
] |
saalvare@cisco.com
|
bf676537a89da5673703b3507daf8b5d7be47720
|
161d96f4b16e1cc7e473f12b7070aa9f5615e28e
|
/iternal/discord/cogs/admin/debug.py
|
d348403298838515aca40e1a29a7ddeae2b6b833
|
[] |
no_license
|
pikoUsername/prison-bot
|
be717d4d64eb18d922bf808bb9d48db659269318
|
c6b8df54e71a4f0fc0fa1568d9da070a290d43da
|
refs/heads/master
| 2023-04-02T18:46:52.034934
| 2021-04-13T05:07:41
| 2021-04-13T05:07:41
| 345,583,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,744
|
py
|
from discord.ext import commands as commands
from discord import Embed as Embed
from .utils import log as log
from iternal.discord.loader import _ as __, proj_root as proj_root
from iternal.store.prison import Prison as Prison
class Debugger(commands.Cog, name='pantry | Кладовка'):
"""
Использвать для того что бы не копатся в говно коде.
For not change shit code.
"""
__slots__ = "bot",
def __init__(self, bot):
self.bot = bot
@commands.command(name="show_logs", help=__("shows last logs"))
@commands.is_owner()
async def show_logs(self, ctx, file: str = None):
if not file:
last_log_path = log.last_file()
with open(proj_root / "logs" / last_log_path) as f:
result = f.read()
e = Embed(
title=last_log_path.name,
description=result
)
await ctx.send(embed=e)
return
e = Embed(
title=file,
description="".join(await log.read_log(
proj_root / "logs" / file
))
)
await ctx.send(embed=e)
@commands.group(name="i18n")
@commands.has_permissions(administrator=True)
async def change_guild_language(self, ctx, language: str):
try:
await Prison.change_lang(language, ctx.guild.id)
except TypeError as ex:
await ctx.send(embed=Embed(
title=__("Выбран неправильный язык"), description=f"```{str(ex)}```"
))
return
else:
await ctx.send(__("Успешно изменен язык на {language}").format(language))
|
[
"galymzhan.amantaj@gmail.com"
] |
galymzhan.amantaj@gmail.com
|
064c47c344a4c113a40b9dfe3e8c77cca1f181ac
|
415a8a4315e6331b2a157de8a1429fe0562729f8
|
/python/matplotlib/axisSciNotation.py
|
72ae2b7e657fd9d1a2ed94320b673af0cd30a29a
|
[] |
no_license
|
alfaceor/programming-examples
|
784690dd1104e4adbdf958e4163b3b462f635881
|
abea970a54cfab0eacc5280ae62383495e9e6eeb
|
refs/heads/master
| 2022-05-04T23:14:30.503114
| 2022-04-29T10:11:45
| 2022-04-29T10:11:45
| 36,015,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,197
|
py
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
###### BEGIN PLOT DECORATION VARIABLES
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 18}
plt.rc('font', **font)
plt.rc('text', usetex=True)
###### END PLOT DECORATION VARIABLES
XX = np.linspace(0.0000001, 0.000001, num=20 )
YY = np.linspace(100000, 100002, 20)
fig, ax = plt.subplots()
ax.set_xlabel("$\sqrt{x}$", size=30)
ax.tick_params(axis='x', labelsize=10)
ax.tick_params(axis='y', labelsize=10)
ax.ticklabel_format(useOffset=True, useMathText=True, style='sci', axis='x', scilimits=(0,0) )
ax.ticklabel_format( useMathText=True, style='sci', axis='y', scilimits=(0,0) )
#ax.ticklabel_format(useOffset=False)
offset = ax.yaxis.get_offset_text()
print offset
print offset.get_position()
offset.set_x(0.2)
offset.set_y(1.0)
offset.set_fontsize(10)
print "get_text() = ", offset.get_text()
print "get_usetex() = ", offset.get_usetex()
print offset.get_position()
offset.set_usetex("AAA")
print "get_text() = ", offset.get_text()
ax.plot(XX,YY)
#ax.xaxis.set_major_formatter(mtick.ScalarFormatter(useMathText=True))
fig.tight_layout()
plt.show()
|
[
"alfaceor"
] |
alfaceor
|
548b27946f87ef8704f9fc1a9ba223e08c830efa
|
6a3af6fe669b2e17db1fa7d0751cbc4e04948079
|
/fn_icdx/tests/test_icdx_helper.py
|
9b02b54530b75dba078fdc5e236bea2fe5278b8f
|
[
"MIT"
] |
permissive
|
jjfallete/resilient-community-apps
|
5f0a728fe0be958acc44d982bf0289959f84aa20
|
2e3c4b6102555517bad22bf87fa4a06341714166
|
refs/heads/master
| 2022-04-17T13:20:36.961976
| 2020-04-13T07:03:54
| 2020-04-13T07:03:54
| 169,295,943
| 1
| 0
|
MIT
| 2020-04-13T07:03:56
| 2019-02-05T19:06:57
|
Python
|
UTF-8
|
Python
| false
| false
| 5,963
|
py
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import pytest
from fn_icdx.util.helper import ICDXHelper
class TestIcdxUtilitiesHelper:
""" Tests for the helper class"""
@pytest.mark.parametrize("input_dict", [
(({},{},{})),
(({'Malware SHA-256 Hash': u'E1B8TEST', 'File Path': u'c:\\workarea\\TEST', 'File Name': u'TEST'}, {}, {'Malware MD5 Hash': u'C1B07TEST'})),
(({'Malware SHA-256 Hash': u'E1B8TEST', 'File Path': u'c:\\workarea\\TEST', 'File Name': u'TEST'}, None))
])
def test_dictionary_merge(self, input_dict):
""" Test that the dictionary merge function works as expected.
The function should return a dictionary
If 1 or all inputs or None or empty it should not affect the return type"""
helper = ICDXHelper({})
result = helper.merge_dicts(input_dict)
assert(isinstance(result, dict))
@pytest.mark.parametrize("input_dict", [
(({}, {}, {})),
(({},None, {})),
(({}, None))
])
def test_dictionary_merge_failure(self, input_dict):
""" Test that the dictionary merge function handles empty inputs.
The function should return a dictionary
If 1 or all inputs or None or empty it should not affect the return type
But if all inputs are None or empty the return should be False-y i.e empty dict"""
helper = ICDXHelper({})
result = helper.merge_dicts(input_dict)
assert (isinstance(result, dict))
assert(bool(result) == False)
@pytest.mark.parametrize("search_result", [
({"name": "Test","file":{'sha2': u'E1B8TEST', 'path': u'c:\\workarea\\TEST', 'name': u'TEST', 'md5': u'C1B07TEST'}}),
({"name": "Test","file":{'sha2': u'E1B8TEST', 'path': u'c:\\workarea\\TEST'}}),
])
def test_file_artifact_parse_success(self, search_result):
""" Test that the file artifact function works as expected
The function should return a dictionary
With a known good input the return should be Truth-y (has elements)
"""
helper = ICDXHelper({})
result = helper.parse_for_file_artifacts(search_result)
assert (isinstance(result, dict))
assert (bool(result))
@pytest.mark.parametrize("search_result", [
({"name": "Test"}),
({"name": "Test", "file": {}}),
])
def test_file_artifact_parse_failure(self, search_result):
helper = ICDXHelper({})
result = helper.parse_for_file_artifacts(search_result)
assert (not result)
assert (bool(result) is False)
@pytest.mark.parametrize("search_result", [
({"name": "Test",
"connection": {'dst_ip':'192.168.1.1', 'dst_mac':'mac', 'dst_name':'test_conn', 'dst_port':80}}),
({"name": "Test", "connection": {'src_ip':'10.1.1.1', 'src_mac':'mac2', 'src_name':'test_conn2', 'src_port':8080}}),
])
def test_network_artifact_parse_success(self, search_result):
""" Test that the file artifact function works as expected
The function should return a dictionary
With a known good input the return should be Truth-y (has elements)
"""
helper = ICDXHelper({})
result = helper.parse_for_network_artifacts(search_result)
assert (isinstance(result, dict))
assert (bool(result))
@pytest.mark.parametrize("search_result", [
({"name": "Test"}),
({"name": "Test", "connection": {}}),
])
def test_network_artifact_parse_failure(self, search_result):
helper = ICDXHelper({})
result = helper.parse_for_network_artifacts(search_result)
assert (not result)
assert (bool(result) is False)
@pytest.mark.parametrize("search_result", [
({"name": "Test",
"email": {'header_subject': u'Important updates', 'header_from': u'testuser2', 'header_to': u'testuser2'}}),
({"name": "Test", "email": {'header_subject': u'New version of software', 'header_from': u'testuser'}}),
])
def test_email_artifact_parse_success(self, search_result):
""" Test that the file artifact function works as expected
The function should return a dictionary
With a known good input the return should be Truth-y (has elements)
"""
helper = ICDXHelper({})
result = helper.parse_for_email_artifacts(search_result)
assert (isinstance(result, dict))
assert (bool(result))
@pytest.mark.parametrize("search_result", [
({"name": "Test"}),
({"name": "Test", "email": {}}),
])
def test_email_artifact_parse_failure(self, search_result):
helper = ICDXHelper({})
result = helper.parse_for_email_artifacts(search_result)
assert (not result)
assert (bool(result) is False)
@pytest.mark.parametrize("search_result", [
({"name": "Test with email object ",
"connection": {'dst_ip': '10.1.1.1', 'dst_mac': 'mac', 'dst_name': 'test_conn', 'dst_port': 80}, 'email' :{'sender_ip':'127.0.0.1'}}),
({"name": "Test with email object 2",
"connection": {'src_ip': '10.1.1.1', 'src_mac': 'mac2', 'src_name': 'test_conn2', 'src_port': 8080},'email' :{'sender_ip':'127.0.0.1'}}),
])
def test_multi_artifact_parse_success(self, search_result):
""" Test that the file artifact function works as expected
The function should return a dictionary
With a known good input the return should be Truth-y (has elements)
"""
helper = ICDXHelper({})
result = helper.search_for_artifact_data(search_result)
assert (isinstance(result, dict))
assert (result is not None)
assert '10.1.1.1' in result['IP Address']
if 'email object' in search_result['name']:
assert '127.0.0.1' in result['IP Address']
|
[
"brianwal@us.ibm.com"
] |
brianwal@us.ibm.com
|
677afc8e9b4fedb3137341367891adad32492c94
|
00fc70953dd85f2699fc80cd7c59e9c472cbb90e
|
/test/test_retention_metadata.py
|
841367183a76561a116775360f9a0f8cb528dae3
|
[
"MIT"
] |
permissive
|
solidgoldbomb/python-harbor
|
89b270caaa6ac450245753c0a3a13af3ff96c142
|
4a12789a9712cc101abd3f8d32464bc8a474e0a4
|
refs/heads/main
| 2023-02-25T20:22:19.137094
| 2021-02-01T11:58:24
| 2021-02-01T11:58:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
# coding: utf-8
"""
Harbor API
These APIs provide services for manipulating Harbor project. # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import harbor
from harbor.models.retention_metadata import RetentionMetadata # noqa: E501
from harbor.rest import ApiException
class TestRetentionMetadata(unittest.TestCase):
"""RetentionMetadata unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRetentionMetadata(self):
"""Test RetentionMetadata"""
# FIXME: construct object with mandatory attributes with example values
# model = harbor.models.retention_metadata.RetentionMetadata() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"angeiv.zhang@gmail.com"
] |
angeiv.zhang@gmail.com
|
a1a70d3b122ddbe41c7677983806a28e4eeed340
|
0b60be2c526149603fcbd3f1a40545ed5d6ed1fc
|
/uiautomator2/exceptions.py
|
c5cc3d53d859bec09c096e8a1a9a6578010f23ac
|
[
"MIT"
] |
permissive
|
levixie/uiautomator2
|
6f2d4eb1bd1e416dc6ad8a0e50f56dd2f1344530
|
7fa465601d747aad5c673d603486ad2145b9e860
|
refs/heads/master
| 2020-05-20T20:55:13.068591
| 2019-06-23T23:03:57
| 2019-06-23T23:03:57
| 185,751,403
| 0
| 0
|
MIT
| 2019-06-23T23:03:58
| 2019-05-09T07:46:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
# coding: utf-8
#
# class ATXError(Exception):
# pass
class UiaError(Exception):
pass
class ConnectError(UiaError):
pass
class XPathElementNotFoundError(UiaError):
pass
class GatewayError(UiaError):
def __init__(self, response, description):
self.response = response
self.description = description
def __str__(self):
return "uiautomator2.GatewayError(" + self.description + ")"
class JsonRpcError(UiaError):
@staticmethod
def format_errcode(errcode):
m = {
-32700: 'Parse error',
-32600: 'Invalid Request',
-32601: 'Method not found',
-32602: 'Invalid params',
-32603: 'Internal error',
-32001: 'Jsonrpc error',
-32002: 'Client error',
}
if errcode in m:
return m[errcode]
if errcode >= -32099 and errcode <= -32000:
return 'Server error'
return 'Unknown error'
def __init__(self, error={}, method=None):
self.code = error.get('code')
self.message = error.get('message', '')
self.data = error.get('data', '')
self.method = method
def __str__(self):
return '%d %s: <%s> data: %s, method: %s' % (
self.code, self.format_errcode(
self.code), self.message, self.data, self.method)
def __repr__(self):
return repr(str(self))
class SessionBrokenError(UiaError):
""" only happens when app quit or crash """
class UiObjectNotFoundError(JsonRpcError):
pass
class UiAutomationNotConnectedError(JsonRpcError):
pass
class NullObjectExceptionError(JsonRpcError):
pass
class NullPointerExceptionError(JsonRpcError):
pass
class StaleObjectExceptionError(JsonRpcError):
pass
|
[
"codeskyblue@gmail.com"
] |
codeskyblue@gmail.com
|
1b2d28b012a3ae9806e7d4ee05e0969f02935089
|
ed9e1b622dad6b559cd0fe6fa23d6a27f857dc7f
|
/galsim/deprecated/gsobject_ring.py
|
6a44668976b265c701e31f9d701d4287480961e9
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
ajwheeler/GalSim
|
40d6f8c64789b601ed2547eefed05f1577592613
|
cf0ef33e5f83da1b13a0617d362d8357056d6f22
|
refs/heads/master
| 2021-01-22T06:14:31.486159
| 2017-04-20T01:20:20
| 2017-04-20T01:20:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,029
|
py
|
# Copyright (c) 2012-2017 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import galsim
import galsim.config
from galsim.deprecated import depr
# This file adds gsobject type Ring which builds an object once every n times, and then
# rotates it in a ring for the other n-1 times per per group.
def _BuildRing(config, base, ignore, gsparams, logger):
"""@brief Build a GSObject in a Ring. Now deprecated.
"""
depr('gal.type = Ring', 1.4, 'stamp.type = Ring',
'The galaxy Ring type may not work properly in conjunction with image.nproc != 1. '+
'See demo5 and demo10 for examples of the new stamp type=Ring syntax.')
req = { 'num' : int, 'first' : dict }
opt = { 'full_rotation' : galsim.Angle , 'index' : int }
# Only Check, not Get. We need to handle first a bit differently, since it's a gsobject.
galsim.config.CheckAllParams(config, req=req, opt=opt, ignore=ignore)
num = galsim.config.ParseValue(config, 'num', base, int)[0]
if num <= 0:
raise ValueError("Attribute num for gal.type == Ring must be > 0")
# Setup the indexing sequence if it hasn't been specified using the number of items.
galsim.config.SetDefaultIndex(config, num)
index, safe = galsim.config.ParseValue(config, 'index', base, int)
if index < 0 or index >= num:
raise AttributeError("index %d out of bounds for config.%s"%(index,type))
if 'full_rotation' in config:
full_rotation = galsim.config.ParseValue(config, 'full_rotation', base, galsim.Angle)[0]
else:
import math
full_rotation = math.pi * galsim.radians
dtheta = full_rotation / num
if logger:
logger.debug('obj %d: Ring dtheta = %f',base['obj_num'],dtheta.rad())
if index % num == 0:
# Then this is the first in the Ring.
gsobject = galsim.config.BuildGSObject(config, 'first', base, gsparams, logger)[0]
else:
if not isinstance(config['first'],dict) or 'current_val' not in config['first']:
raise RuntimeError("Building Ring after the first item, but no current_val stored.")
gsobject = config['first']['current_val'].rotate(index*dtheta)
return gsobject, False
# Register this as a valid gsobject type
galsim.config.RegisterObjectType('Ring', _BuildRing, _is_block=True)
|
[
"michael@jarvis.net"
] |
michael@jarvis.net
|
9d492d34a91b9b6a2adaf147e43b85fd5e1810c3
|
6bf7149077f539ab599db1f717c93aca82724512
|
/static-and-class_metods/movie-world/customer.py
|
3aed9a54bd7db6ed12a7a149c4e8651417f97feb
|
[] |
no_license
|
KalinHar/OOP-Python-SoftUni
|
8b53e8b734b364878c5372525c4249fdd32f0899
|
9787eea7ab5101e887ed4aaeb0a8b3b80efcfdd7
|
refs/heads/master
| 2023-07-09T08:15:59.765422
| 2021-08-16T06:01:08
| 2021-08-16T06:01:19
| 380,813,294
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
class Customer:
def __init__(self, name, age, id):
self.name = name
self.age = age
self.id = id
self.rented_dvds = []
def __repr__(self):
return f"{self.id}: {self.name} of age {self.age} has {len(self.rented_dvds)} " \
f"rented DVD's ({', '.join([dvd.family_name for dvd in self.rented_dvds])})"
|
[
"kalix@abv.bg"
] |
kalix@abv.bg
|
13ecdc3abf138774ecd6ae0a21a5abc0bc3c11a0
|
a321652061fd5c389490030f898a0191a35f21c3
|
/managers/operatorsTranscribe/metadata_filegenerator.py
|
d22dd980ee14d63ef5b5c619fa7a92e2f3597a43
|
[
"BSD-3-Clause"
] |
permissive
|
cerebunit/cerebmodels
|
5c4b1485e68d078988ff7057385309a26e05415a
|
316d69d7aed7a0292ce93c7fea20473e48cfce60
|
refs/heads/master
| 2023-02-18T06:45:08.716877
| 2022-07-18T11:34:13
| 2022-07-18T11:34:13
| 139,698,484
| 0
| 1
|
BSD-3-Clause
| 2023-02-10T21:20:07
| 2018-07-04T09:15:24
|
AMPL
|
UTF-8
|
Python
| false
| false
| 7,147
|
py
|
# ~/managers/operatorsTranscribe/metadata_filegenerator.py
import platform
import uuid
import time
from datetime import datetime
from dateutil.tz import tzlocal
class FileGenerator(object):
"""
**Available Methods:**
+---------------------------------+----------------------------------+
| Method name | Method type |
+=================================+==================================+
| :py:meth:`.forfile` | class method |
+---------------------------------+----------------------------------+
| :py:meth:`.get_modelID` | static method |
+---------------------------------+----------------------------------+
| :py:meth:`.get_username` | static method |
+---------------------------------+----------------------------------+
| :py:meth:`.get_testdescription` | static method |
+---------------------------------+----------------------------------+
| :py:meth:`.get_labelname` | static method |
+---------------------------------+----------------------------------+
| :py:meth:`.get_institution` | static method |
+---------------------------------+----------------------------------+
"""
@staticmethod
def get_modelID(model):
"""Try extracting the ``uuid`` attribute value of the model and return it or return ``"no_model_uuid"``.
**Argument:** The instantiated model is passed into this function.
"""
try:
uuid_value = getattr(model, 'uuid')
return uuid_value[:8]+"_"+uuid_value[-12:]
except:
return "no_model_uuid"
@staticmethod
def get_username(username):
"""Returns the username (also argument passed as a string) or returns "anonymous" if argument is ``None``.
"""
if username is None:
return "anonymous"
else:
return username
@staticmethod
def get_testdescription(test):
"""Returns the string "raw simulation without running any CerebUnit test" if the argument is ``None`` otherwise it returns the attribute ``.description`` of the argument.
*Note:* The argument should be the test metadata.
"""
if test is None:
return "raw simulation without running any CerebUnit test"
else:
return test.description
@staticmethod
def get_labname(labname):
"""Returns the string "no lab name was provided" if the argument is ``None`` otherwise it returns the attribute the argument (a string) itself.
*Note:* The argument should be the name of the laboratory.
"""
if labname is None:
return platform.platform()
else:
return labname
@staticmethod
def get_institution(instname):
"""Returns the string "no institution was provided" if the argument is ``None`` otherwise it returns the attribute the argument (a string) itself.
*Note:* The argument should be the name of the institute.
"""
if instname is None:
return "no institution was provided"
else:
return instname
@classmethod
def forfile( cls, chosenmodel=None, simtime=None, vtest=None,
username=None, labname=None, institutename=None ):
"""Creates the `NWB <https://www.nwb.org/>`_ formatted metadata for an intended file to be saved.
**Keyword Arguments:**
+------------------------------+--------------------------------------------+
| Key | Value type |
+==============================+============================================+
| ``chosenmodel`` | instantiated model |
+------------------------------+--------------------------------------------+
| ``simtime`` | datetime.datetime when simulation started |
+------------------------------+--------------------------------------------+
| ``vtest`` (optional) | instantiated validation ``CerebUnit`` test |
+------------------------------+--------------------------------------------+
| ``username`` (optional) | string |
+------------------------------+--------------------------------------------+
| ``labname`` (optional) | string |
+------------------------------+--------------------------------------------+
| ``institutename`` (optional) | string |
+------------------------------+--------------------------------------------+
**Returned value:** It is a dictionary of the form
::
{ "session_description": string;
"identifier": string,
"session_start_time": datetime,
"experimenter": string,
"experiment_description": string,
"session_id": string,
"lab": string,
"institution": string }
*NOTE:*
* ``vtest`` is not given for raw stimulation of the chosenmodel
* http://pynwb.readthedocs.io/en/latest/pynwb.file.html#pynwb.file.NWBFile
**Use case:**
``>> fg = FileGenerator()``
``>> model = Xyz()``
For simulation without validation test
``>> filemd = fg.forfile(chosenmodel = model)``
For simulation with validation test
``>> vtest = SomeTest()``
``>> filemd = fg.forfile(chosenmodel=model, simtime=datetime.datetime.now(), test=vtest, username='john', labname='hbp brain sim lab', institute='CNRS-UNIC')``
"""
if chosenmodel is None and simtime is None:
raise ValueError("passing an instantiated chosenmodel and datetime is mandatory")
else:
return {
#'source': platform.platform(), #string. No longer part of NWB2.0
# Required
'session_description': "simulation of " + chosenmodel.modelname,
'identifier': cls.get_modelID(chosenmodel), #string
'session_start_time': datetime(simtime.year, simtime.month, simtime.day,
simtime.hour, simtime.minute, simtime.second,
simtime.microsecond, tzinfo=tzlocal()),
#'session_start_time': time.asctime(time.gmtime(time.time()))+' '+time.tzname[0],
# Optional
'experimenter': cls.get_username(username), #string
'experiment_description': cls.get_testdescription(vtest), #string
'session_id': str(hash(str(uuid.uuid1()))).replace('-',''), # remove any minus
'lab': cls.get_labname(labname), #string
'institution': cls.get_institution(institutename) }
|
[
"neuralgraphs@gmail.com"
] |
neuralgraphs@gmail.com
|
6decd7a44b88980b1b2eb4cf82a52ae9b74c63df
|
4c499782655f8e929a5dd6b39d6c5d378fcfd7bd
|
/2_7_grid.py
|
4c2dab664014608b73819477edbe701451b3cf3f
|
[] |
no_license
|
IanCBrown/practice_questions
|
53a3fd66bee807f6e30e6d57632966f146c704c9
|
838b94c26cd3c26b76c3908277944a3b5f9bc7c7
|
refs/heads/master
| 2021-08-06T07:14:57.237709
| 2020-04-16T05:03:33
| 2020-04-16T05:03:33
| 149,521,025
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
from __future__ import absolute_import
import sys
grid = []
def bfs(grid, n, m):
visited = []
q = [(0,0)]
while q:
node = q.pop(0)
if node[0] == n and node[1] == m:
visited.append(node)
return visited
if node not in visited:
visited.append(node)
neighbors = get_neighbors(node[0], node[1], grid[node[0]][node[1]], n, m)
for neighbor in neighbors:
q.append(neighbor)
def solution(grid, n, m):
visited = []
q = [[(0,0)]]
while q:
path = list(q.pop(0))
node = path[-1]
if node not in visited:
neighbors = get_neighbors(node[0], node[1], grid[node[0]][node[1]], n, m)
for neighbor in neighbors:
new_path = list(path)
new_path.append(neighbor)
q.append(new_path)
if neighbor[0] == n and neighbor[1] == m:
return new_path
visited.append(node)
return -1
def get_neighbors(row, col, value, n, m):
ret = []
if row + value <= n:
ret.append((row + value, col))
if row - value >= 0:
ret.append((row - value, col))
if col + value <= m:
ret.append((row, col + value))
if col - value >= 0:
ret.append((row, col - value))
return ret
def main():
dim = [int(x) for x in raw_input().split()]
n = dim[0]
m = dim[1]
grid = [[int(x) for x in list(line.strip())] for line in sys.stdin.readlines()]
path = solution(grid, n - 1, m - 1)
if path == -1:
print -1
else:
print len(path) - 1
if __name__ == u"__main__":
main()
|
[
"icb0004@auburn.edu"
] |
icb0004@auburn.edu
|
bdc8891fa9754d6a88a31b22a34e0d3fbb425a84
|
2d4380518d9c591b6b6c09ea51e28a34381fc80c
|
/CIM16/IEC61970/Informative/InfLoadControl/LoadShedFunction.py
|
026cb4a998ab4aeff40560258328b4003d84f7c7
|
[
"MIT"
] |
permissive
|
fran-jo/PyCIM
|
355e36ae14d1b64b01e752c5acd5395bf88cd949
|
de942633d966bdf2bd76d680ecb20517fc873281
|
refs/heads/master
| 2021-01-20T03:00:41.186556
| 2017-09-19T14:15:33
| 2017-09-19T14:15:33
| 89,480,767
| 0
| 1
| null | 2017-04-26T12:57:44
| 2017-04-26T12:57:44
| null |
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61970.Informative.InfLoadControl.LoadMgmtFunction import LoadMgmtFunction
class LoadShedFunction(LoadMgmtFunction):
"""A kind of LoadMgmtFunction that sheds a part of the customer load.A kind of LoadMgmtFunction that sheds a part of the customer load.
"""
def __init__(self, switchedLoad=0.0, *args, **kw_args):
"""Initialises a new 'LoadShedFunction' instance.
@param switchedLoad: The value of the load that is connected to the shedding switch. Typically this is a noted nominal value rather than a measured value.
"""
#: The value of the load that is connected to the shedding switch. Typically this is a noted nominal value rather than a measured value.
self.switchedLoad = switchedLoad
super(LoadShedFunction, self).__init__(*args, **kw_args)
_attrs = ["switchedLoad"]
_attr_types = {"switchedLoad": float}
_defaults = {"switchedLoad": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
[
"fran_jo@hotmail.com"
] |
fran_jo@hotmail.com
|
97f2b7fc974fd31f932b8119a1893c6cdcfbf035
|
55a273347cb103fe2b2704cb9653956956d0dd34
|
/code/tmp_rtrip/encodings/cp1257.py
|
f99bc227bb34ce2c00c56c2c65c9741d841809b6
|
[
"MIT"
] |
permissive
|
emilyemorehouse/ast-and-me
|
4af1bc74fc967ea69ac1aed92664f6428acabe6a
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
refs/heads/master
| 2022-11-18T03:50:36.505882
| 2018-05-12T17:53:44
| 2018-05-12T17:53:44
| 115,035,148
| 25
| 1
|
MIT
| 2022-11-04T11:36:43
| 2017-12-21T18:27:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp1257', encode=Codec().encode, decode=
Codec().decode, incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder, streamreader=StreamReader,
streamwriter=StreamWriter)
decoding_table = (
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f€\ufffe‚\ufffe„…†‡\ufffe‰\ufffe‹\ufffe¨ˇ¸\ufffe‘’“”•–—\ufffe™\ufffe›\ufffe¯˛\ufffe\xa0\ufffe¢£¤\ufffe¦§Ø©Ŗ«¬\xad®Æ°±²³´µ¶·ø¹ŗ»¼½¾æĄĮĀĆÄÅĘĒČÉŹĖĢĶĪĻŠŃŅÓŌÕÖ×ŲŁŚŪÜŻŽßąįāćäåęēčéźėģķīļšńņóōõö÷ųłśūüżž˙'
)
encoding_table = codecs.charmap_build(decoding_table)
|
[
"emily@cuttlesoft.com"
] |
emily@cuttlesoft.com
|
cff6911a30320c0e916ef582ab4652791fb96ef3
|
3b07edf0cd5c546a09ef79fd93ebfd2b04e162e1
|
/data/ebs-ai-basic/2/2/05.py
|
09b0cd80cc813b63c21f1f472eb40eb13a01d637
|
[] |
no_license
|
neverlish/Learned
|
603b41f7c6ba3cf4e5eea162f501fc42f8326aa3
|
47f9160c2e516c8b4d1692f1f7dbf200f1cadbb6
|
refs/heads/master
| 2023-06-24T06:03:35.848932
| 2023-06-10T11:38:53
| 2023-06-10T11:38:53
| 78,947,372
| 8
| 1
| null | 2023-09-14T05:26:47
| 2017-01-14T15:12:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,670
|
py
|
# 행렬의 차이를 이용해서 두 이미지 표현하기
import turtle
import numpy as np
pixelSize = 10
def putPixel(x, y, pSize, pCol):
turtle.penup()
turtle.goto(x*pSize, (-1)*y*pSize)
turtle.pendown()
turtle.begin_fill()
turtle.fillcolor(pCol)
turtle.setheading(45)
turtle.circle(pSize/2, steps=4)
turtle.end_fill()
faceImg = np.array( # 46쪽 (a) 도형을 나타내는 이미지 데이터 행렬
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
smileImg = np.array( # 46쪽 (b) 도형을 나타내는 이미지 데이터 행렬
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 2, 0, 2, 2, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
diffImage = np.array(faceImg - smileImg)
for j in range(0, 16):
for i in range(0, 16):
if (diffImage[j][i] > 0):
putPixel(i, j, pixelSize, "orange")
else:
# 각 배열 요소의 값이 0이면 흰색으로 출력
putPixel(i, j, pixelSize, "white")
|
[
"noreply@github.com"
] |
neverlish.noreply@github.com
|
4d7128e1848b65dbcfc95770e62a47980627bd14
|
a463f5858c663199b6f6e38d9b2dc93e9a9ae730
|
/problem/2003/00_200316/4522_세상의모든팰린드롬.py
|
e11adf0825d5dc9730b7dab4969ef9b58acbd159
|
[] |
no_license
|
do-park/swexpertacademy
|
4993f79e3a73697ecdc71e0f654306466626b00b
|
7cbbb0957ce5191cb44cd35094da5b0d29783e49
|
refs/heads/master
| 2020-12-22T19:26:35.257666
| 2020-10-19T02:02:32
| 2020-10-19T02:02:32
| 236,907,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
for tc in range(1, int(input()) + 1):
flag = ['Not exist', 'Exist']
f = 1
string = list(map(str, input()))
for i in range(0, len(string) // 2):
if string[i] != string[len(string) - i - 1]:
if string[i] == '?' or string[len(string) - i - 1] == '?':
continue
else:
f = 0
break
print(f'#{tc} {flag[f]}')
|
[
"dohee.pa@gmail.com"
] |
dohee.pa@gmail.com
|
d6391f42975e1fd64be45e9ba6750c4ab0765042
|
bb06411e37bcba25a89bd523133a4e4d083558f1
|
/sdk/python/pulumi_xyz/provider.py
|
aba9d6d647b165a5401f010edb5b6ccd24ccc548
|
[
"Apache-2.0"
] |
permissive
|
zelarhq/pulumi-provider-boilerplate-openapi
|
7410b57bc220bee9c7095d0a6e9d8e8250ff5620
|
aec2430937a0c5df072c713f4641ea5ba128569f
|
refs/heads/master
| 2023-08-14T19:14:58.092965
| 2021-06-03T13:32:23
| 2021-06-03T13:32:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,776
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProviderArgs', 'Provider']
@pulumi.input_type
class ProviderArgs:
def __init__(__self__):
"""
The set of arguments for constructing a Provider resource.
"""
pass
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None):
"""
Create a Xyz resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ProviderArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Xyz resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param ProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
super(Provider, __self__).__init__(
'xyz',
resource_name,
__props__,
opts)
|
[
"github@mikhail.io"
] |
github@mikhail.io
|
87ea8efb7076d210da8faaf220c1197c97519945
|
a9bd335b3948d707a6785490e132d59e375c7242
|
/Problem 101 - 200/P107.py
|
a61e07fcfa2ee8db5f4498938d386c14f1f48737
|
[] |
no_license
|
xzguy/LeetCode
|
cc52f5329843bb2f692cea131c20b629af910bab
|
54d99f5e54391ea6ed467e2b5c984848a918dc2a
|
refs/heads/main
| 2023-03-25T17:50:50.173621
| 2021-03-28T16:06:03
| 2021-03-28T16:06:03
| 331,409,968
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def levelOrderBottom(self, root: TreeNode) -> [[int]]:
if not root:
return []
queue = [root]
res = []
while queue:
q_len = len(queue)
lvl_list = []
for _ in range(q_len):
node = queue.pop(0)
lvl_list.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.insert(0, lvl_list)
return res
def levelOrderBottom_1(self, root: TreeNode) -> [[int]]:
stack = [(root, 0)]
res = []
while stack:
node, level = stack.pop()
if node:
if len(res)-1 < level:
res.insert(0, [])
res[-(level+1)].append(node.val)
stack.append((node.right, level+1))
stack.append((node.left, level+1))
return res
t = TreeNode(3)
t.left = TreeNode(9)
t.right = TreeNode(20)
t.left.left = TreeNode(15)
t.right.right = TreeNode(7)
sol = Solution()
print(sol.levelOrderBottom_1(t))
|
[
"xuzhou.guy@gmai"
] |
xuzhou.guy@gmai
|
1dd8bdd14d661670b6c8fefa7039e907d5c112bc
|
e32ee307e4c59cc18f9dea18d797784a1b23148f
|
/removing the longest palindrome from it..py
|
6f68218a0b80234c2729e290897466feffb27853
|
[] |
no_license
|
GuhanSGCIT/SGCIT
|
f4ab44346186d45129c74cbad466c6614f9f0f08
|
8b2e5ccf693384aa22aa9d57f39b63e4659f6261
|
refs/heads/master
| 2020-07-11T05:47:54.033120
| 2020-07-07T05:02:41
| 2020-07-07T05:02:41
| 204,459,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
# Python 3 implementation to find
# minimum number of deletions
# to make a string palindromic
# Returns the length of
# the longest palindromic
# subsequence in 'str'
def lps(str):
n = len(str)
# Create a table to store
# results of subproblems
L = [[0 for x in range(n)]for y in range(n)]
# Strings of length 1
# are palindrome of length 1
for i in range(n):
L[i][i] = 1
# Build the table. Note that
# the lower diagonal values
# of table are useless and
# not filled in the process.
# c1 is length of substring
for cl in range( 2, n+1):
for i in range(n - cl + 1):
j = i + cl - 1
if (str[i] == str[j] and cl == 2):
L[i][j] = 2
elif (str[i] == str[j]):
L[i][j] = L[i + 1][j - 1] + 2
else:
L[i][j] = max(L[i][j - 1],L[i + 1][j])
# length of longest
# palindromic subseq
return L[0][n - 1]
# function to calculate
# minimum number of deletions
def minimumNumberOfDeletions( str):
n = len(str)
# Find longest palindromic
# subsequence
l = lps(str)
# After removing characters
# other than the lps, we
# get palindrome.
return (l)
# Driver Code
if __name__ == "__main__":
str = input()
print(minimumNumberOfDeletions(str))
|
[
"noreply@github.com"
] |
GuhanSGCIT.noreply@github.com
|
8aefb0884aeec07500f70bb238b221cef9a1b21b
|
cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b
|
/python_test/except.py
|
a006780e3fc9b3e0aafc7e7df11473ee0268902a
|
[] |
no_license
|
1026237416/Python
|
ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14
|
ffa8f9ffb8bfec114b0ca46295db05c4213c4c30
|
refs/heads/master
| 2021-07-05T00:57:00.456886
| 2019-04-26T10:13:46
| 2019-04-26T10:13:46
| 114,510,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
s=raw_input("Please input your age:")
if s == "":
raise Exception("Input must no be empty.")
try:
i = int(s)
except ValueError:
print "Could not convert data to an integer."
except:
print "Unknow exception!"
else:
print "You are %d"%i, "years old"
finally:
print "Goodbye"
|
[
"1026237416@qq.com"
] |
1026237416@qq.com
|
f96f89d9b4a367edb8f8dbf294ee4d21024402b0
|
283464882733bf4fb9d98c74f46279e98cfd20f9
|
/calenguay/events/views.py
|
20e252ce8b369cbac61b1125d1ab5f7e54873b6a
|
[] |
no_license
|
mariocesar/calenguay
|
8647734f3fdf3fdeacca44f493b7128be9de54f0
|
f1759a2a0c35bfc82976b36d7cea65e10a81f572
|
refs/heads/master
| 2021-09-25T11:43:08.339647
| 2020-03-24T22:55:57
| 2020-03-24T22:55:57
| 249,537,405
| 0
| 0
| null | 2021-09-22T18:46:55
| 2020-03-23T20:30:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,560
|
py
|
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.mail import message
from django.shortcuts import get_object_or_404, redirect, render
from calenguay.events.models import Category, Event, EventType
def landing(request):
categories = Category.objects.all()
return render(request, "landing.html", {"categories": categories})
def category_detail(request, pk):
category = get_object_or_404(Category, pk=pk)
event_types = EventType.objects.filter(user__in=category.users.all())
return render(
request,
"categories/detail.html",
{"category": category, "event_types": event_types},
)
def eventtype_detail(request, pk):
event_type = get_object_or_404(EventType, pk=pk)
return render(request, "eventtypes/detail.html", {"event_type": event_type})
class AppointmentForm(forms.Form):
start_at = forms.DateTimeField()
def eventtype_make_appointment(request, pk):
event_type = get_object_or_404(EventType, pk=pk)
form = AppointmentForm(request.POST or None)
if form.is_valid():
event = Event(
eventtype=event_type,
start_at=form.cleaned_data["start_at"],
user=request.user,
)
try:
event.full_clean()
except ValidationError as err:
messages.error(request, repr(err))
else:
event.save()
else:
messages.error(request, repr(form.errors))
return redirect("eventtype_detail", pk=pk)
|
[
"mariocesar.c50@gmail.com"
] |
mariocesar.c50@gmail.com
|
49606bc5d3a8da0cae3a8792f1f6867089dfb5f2
|
f9d3ee7b3b203d23f9ef2c95055e1d26bfddd74b
|
/nn/embedding/embedding.py
|
29d3ea957df9c0aae268444adcd8460d4784fb39
|
[
"MIT"
] |
permissive
|
kefirski/amt
|
ec4b1d4f2bb9a0103c82ef3c09357894272b44f4
|
6dcca5743ea8750a740c569181ec6998352ef784
|
refs/heads/master
| 2021-05-12T19:26:27.756474
| 2018-01-14T19:56:24
| 2018-01-14T19:56:24
| 117,093,089
| 28
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
from math import sqrt
import numpy as np
import torch as t
import torch.nn as nn
from gensim.models.keyedvectors import KeyedVectors
from torch.autograd import Variable
class Embeddings(nn.Module):
def __init__(self, path, vocab_size, max_len, h_size):
super(Embeddings, self).__init__()
self.vocab_size = vocab_size
self.max_len = max_len
self.h_size = h_size
self.token_embeddings = nn.Embedding(vocab_size, h_size)
self.positional_embeddings = nn.Embedding(int(max_len), h_size, padding_idx=0)
self._token_embedding_init(path)
self._position_embedding_init()
def forward(self, input):
mixed = len(input.size()) == 3
batch_size, seq_len, *_ = input.size()
positional = Variable(t.LongTensor([i for i in range(1, seq_len + 1)])).repeat(batch_size).view(batch_size, -1)
if input.is_cuda:
positional = positional.cuda()
if mixed:
input = input.view(batch_size * seq_len, -1)
return t.mm(input, self.token_embeddings.weight).view(batch_size, seq_len, -1) + \
self.positional_embeddings(positional)
else:
return self.token_embeddings(input) + self.positional_embeddings(positional)
def _randn_embed(self):
return np.random.randn(self.h_size) / sqrt(self.h_size)
def _token_embedding_init(self, path):
"""
:param path: Path to pretrained embeddings for each index in vocabulary
"""
keyed_vectors = KeyedVectors.load_word2vec_format(path, binary=True)
embeddings = np.array([keyed_vectors.wv[str(idx)] if str(idx) in keyed_vectors.vocab else self._randn_embed()
for idx in range(self.vocab_size)])
self.token_embeddings.weight = nn.Parameter(t.from_numpy(embeddings).float(), requires_grad=False)
def _position_embedding_init(self):
encoding = np.array([
[pos / np.power(10000, 2 * i / self.h_size) for i in range(self.h_size)]
if pos != 0 else np.zeros(self.h_size) for pos in range(self.max_len)
])
encoding[1:, 0::2] = np.sin(encoding[1:, 0::2])
encoding[1:, 1::2] = np.cos(encoding[1:, 1::2])
self.positional_embeddings.weight = nn.Parameter(t.from_numpy(encoding).float(), requires_grad=False)
|
[
"olavursky@gmail.com"
] |
olavursky@gmail.com
|
0845d1f075a7811404b355d17f30f0b0df9abc6c
|
045cb1a5638c3575296f83471758dc09a8065725
|
/addons/whatsapp_live/__manifest__.py
|
38438a168cf3acf2f7b5d8951b7acb880a863574
|
[] |
no_license
|
marionumza/saas
|
7236842b0db98d1a0d0c3c88df32d268509629cb
|
148dd95d991a348ebbaff9396759a7dd1fe6e101
|
refs/heads/main
| 2023-03-27T14:08:57.121601
| 2021-03-20T07:59:08
| 2021-03-20T07:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2019 GTICA C.A. - Ing Henry Vivas
{
'name': 'Whatsapp Connect Chat Live',
'summary': 'Marketing, Sale, connect Chat Whatsapp live for your business',
'version': '13.0.1.0.0',
'category': 'Website',
'author': 'Harpiya Software Technologies',
'support': 'info@harpiya.com',
'license': 'AGPL-3',
'website': 'http://harpiya.com/',
'depends': [
'web',
'website',
],
'data': [
'views/res_config_settings.xml',
'views/assets.xml',
'views/view_website_whatsapp.xml',
],
'images': ['static/description/main_screenshot.png'],
'application': False,
'installable': True,
}
|
[
"yasir@harpiya.com"
] |
yasir@harpiya.com
|
4bcaa3376d450b8aa1c54c76e3908d3796c1edd3
|
68bfd128be73ab94ce3b2084585d2957376477d5
|
/use_cos_loss/data_process.py
|
b71008b8eac28d7d60168c393e09fd653501c2c9
|
[] |
no_license
|
SunnyangBoy/Table_Derection
|
88ceaab3c5f9f8b5ca5d62b7cdb8bffe6e5520b0
|
35eb4d48858f5aaabe4e46602b169acf67aca2d0
|
refs/heads/master
| 2022-10-21T08:20:22.546040
| 2020-06-17T15:46:30
| 2020-06-17T15:46:30
| 273,015,655
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,758
|
py
|
import cv2
import numpy as np
import os
import math
def avg_count(img):
sum_r = 0
sum_g = 0
sum_b = 0
for i in range(10):
for j in range(10):
sum_r += img[i][j][0]
sum_g += img[i][j][1]
sum_b += img[i][j][2]
return sum_r/100, sum_g/100, sum_b/100
def avg(img1, img2, img3, img4):
r1, g1, b1 = avg_count(img1)
r2, g2, b2 = avg_count(img2)
r3, g3, b3 = avg_count(img3)
r4, g4, b4 = avg_count(img4)
return (r1+r2+r3+r4)//4, (g1+g2+g3+g4)//4, (b1+b2+b3+b4)//4
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
avg_r, avg_g, avg_b = avg(image[0:10, 0:10], image[0:10, w-10:w], image[h-10:h, 0:10], image[h-10:h, w-10:w])
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH), borderValue=(avg_r, avg_g, avg_b))
def crop_image(image, resize):
ratio = np.random.uniform(0.6, 1.0)
print('ratio ', ratio)
h, w = image.shape[:2]
print(h, w)
c_w, c_h = w // 2, h // 2
n_w, n_h = int((w * ratio) / 2), int((h * ratio) / 2)
if (w * ratio) < resize or (h * ratio) < resize:
return image
crop_img = image[c_h - n_h:c_h + n_h, c_w - n_w:c_w + n_w]
return crop_img
if __name__ == '__main__':
pre_num = 6
create_txt = '/home/ubuntu/cs/table_derection_tf2/use_cos_loss/train_data/train_angles.txt'
create_images = '/home/ubuntu/cs/table_derection_tf2/use_cos_loss/train_data/rot_images'
root_dir = '/home/ubuntu/cs/table_derection_tf2/use_cos_loss/train_data/images'
file_names = sorted(os.listdir(root_dir))
rand_angle = np.random.uniform(0, 359, (len(file_names)) * pre_num)
# print("rand_angels ", rand_angle)
rand_angle = rand_angle * math.pi / 180.0
# print("rand_angels ", rand_angle)
print('len: ', len(file_names))
cnt = 0
resize = 600
with open(create_txt, 'w') as writer:
for i, file_name in enumerate(file_names):
print(file_name)
img_path = os.path.join(root_dir, file_name)
image = cv2.imread(img_path)
new_name = file_name[:-4] + '_10' + '.jpg'
new_imgPath = os.path.join(create_images, new_name)
new_image = cv2.resize(image, (resize, resize))
cv2.imwrite(new_imgPath, new_image)
line = new_name + ';' + '0.0000' + '\n'
writer.write(line)
new_image = rotate_bound(image, 90)
new_image = cv2.resize(new_image, (resize, resize))
new_name = file_name[:-4] + '_11' + '.jpg'
new_imgPath = os.path.join(create_images, new_name)
cv2.imwrite(new_imgPath, new_image)
line = new_name + ';' + '1.5708' + '\n'
writer.write(line)
new_image = rotate_bound(image, 180)
new_image = cv2.resize(new_image, (resize, resize))
new_name = file_name[:-4] + '_12' + '.jpg'
new_imgPath = os.path.join(create_images, new_name)
cv2.imwrite(new_imgPath, new_image)
line = new_name + ';' + '3.1416' + '\n'
writer.write(line)
new_image = rotate_bound(image, 270)
new_image = cv2.resize(new_image, (resize, resize))
new_name = file_name[:-4] + '_13' + '.jpg'
new_imgPath = os.path.join(create_images, new_name)
cv2.imwrite(new_imgPath, new_image)
line = new_name + ';' + '4.7124' + '\n'
writer.write(line)
for j in range(pre_num):
angle = rand_angle[cnt]
cnt += 1
new_image = rotate_bound(image, angle / math.pi * 180.0)
new_image = crop_image(new_image, resize)
new_image = cv2.resize(new_image, (resize, resize))
new_name = file_name[:-4] + '_' + str(j).zfill(2) + '.jpg'
new_imgPath = os.path.join(create_images, new_name)
cv2.imwrite(new_imgPath, new_image)
line = new_name + ';' + str(angle) + '\n'
writer.write(line)
writer.close()
|
[
"ubuntu@localhost.localdomain"
] |
ubuntu@localhost.localdomain
|
b372f136945c7c140f928e3fcc06b705264dd82a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2606/60797/247414.py
|
a53d3bf72630c68d1fad189adb9a6e4ed7f2c6f0
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
import math
def search(nums, target):
left = 0
right = len(nums) - 1
i = (left + right) // 2
while target != nums[i]:
if target < nums[i]:
right = i - 1
i = (left + right) // 2
else:
left = i + 1
i = (left + right) // 2
return i
if __name__ == "__main__":
nums = [int(a) for a in input().strip("[]").split(",")]
target = int(input())
re = search(nums, target)
print(re)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
feff5ab73695206b96ceecd566fb5f3a48277960
|
685f4474699d769dae88537c69f5517ac13a8431
|
/EL266.py
|
03916d472efa7cdc10b541a5ebdc329e8c3f3e82
|
[] |
no_license
|
Pumafied/Project-Euler
|
7466f48e449b7314598c106398c0be0424ae72d5
|
0c3e80a956893ce1881a9694131d52b156b9d3d8
|
refs/heads/master
| 2016-09-05T22:45:09.733696
| 2013-04-20T04:46:48
| 2013-04-20T04:46:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
# The divisors of 12 are: 1,2,3,4,6 and 12.
# The largest divisor of 12 that does not exceed the square root of 12 is 3.
# We shall call the largest divisor of an integer n that does not exceed the square root of n the pseudo square root (PSR) of n.
# It can be seen that PSR(3102)=47.
# Let p be the product of the primes below 190.
# Find PSR(p) mod 1016.
|
[
"pumafied@gmail.com"
] |
pumafied@gmail.com
|
a8c147db047fa4bcc792392e8c7a98ea3ea03be4
|
18576820ca4cfbecbfc8e8c05e9aaba0809e6138
|
/fizeau_control_loop.py
|
5ea265e3af610f8f96546e44790327fc23afc2ca
|
[] |
no_license
|
mwanakijiji/lbti_fizeau_control
|
d12e3fcfa12725ab24c42acaaea6e83bd97e2f02
|
08cd6aa2ae77d028b3b5d794d6403bffe143b25d
|
refs/heads/master
| 2021-07-09T09:13:06.175214
| 2018-12-10T16:24:44
| 2018-12-10T16:24:44
| 139,908,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,021
|
py
|
#!/usr/bin/python
from lmircam_tools import *
from lmircam_tools.overlap_psfs import overlap_airy_psfs, overlap_grism_psfs
from lmircam_tools.dial_opd import optimize_opd_fizeau_grism, optimize_opd_fizeau_airy
from lmircam_tools.change_tt import optimize_tt_fizeau_airy
############## BEGIN GROSS OVERLAP OF NON-FIZEAU AIRY PSFS
psf_loc_setpoint = [1220,800] # pixel location for PSFs to be at
overlap_airy_psfs(psf_loc_setpoint) # filter-agnostic
############## END GROSS OVERLAP OF AIRY PSFS
############## BEGIN PUT IN GRISM AND REFINE GRISM-PSF OVERLAP
put_in_grism()
overlap_grism_psfs(psf_loc_setpoint)
############## END PUT IN GRISM AND REFINE GRISM-PSF OVERLAP
############## BEGIN DIAL OPD WITH HPC AND FIND CENTER OF COHERENCE ENVELOPE, THEN REMOVE GRISM
optimize_opd_fizeau_grism(psf_loc_setpoint) # might also use argument of the re-established Fizeau/grism PSF instead of the coordinate where it's supposed to be
remove_grism()
############## END DIAL OPD WITH HPC AND FIND CENTER OF COHERENCE ENVELOPE, THEN REMOVE GRISM
############## BEGIN HOLD CENTER OF SCIENCE COHERENCE ENVELOPE WITH HIGH-CONTRAST FRINGES
# maybe allow HPC to correct pathlength using science readouts, even though Phasecam not closed yet? or does this not have to be automatic, and we can just correct the Fizeau/Airy PSFs once Phasecam is closed?
############## END HOLD CENTER OF SCIENCE COHERENCE ENVELOPE WITH HIGH-CONTRAST FRINGES
############## TRANSLATE NIL + CLOSE PHASECAM LOOP HERE?
# might be manual step
############## BEGIN OPTIMIZE SCIENCE PSF BY FINDING OPD AND TT SETPOINTS ITERATIVELY
optimize_opd_fizeau_airy(psf_location)
optimize_tt_fizeau_airy(psf_location)
## adjust TT to optimize PSF; maybe iterate with OPD?
## note OPD movements cannot be more than 5 um with Phasecam closed
############## END OPTIMIZE SCIENCE PSF BY FINDING OPD AND TT SETPOINTS ITERATIVELY
############## ANY RUNNING STATS I WANT TO KEEP OF THE SCIENCE PSFS?
############## REDO EVERYTHING ONCE NOD HAPPENS
|
[
"spalding@email.arizona.edu"
] |
spalding@email.arizona.edu
|
ed86d450a200b1a3716031f000c74d3c8bc7d0b5
|
449f410b621049c4049a4f7d4b0858f53d56a7d7
|
/tests/test_text.py
|
6551f5ce92298f3a3be022e2e9f7888b1f6651cd
|
[
"MIT"
] |
permissive
|
mvwicky/holdmypics
|
c02f25fd05d9694ff61d5839bd039a3a1bea4b01
|
194b135f885ef76d55975727a4a5125a6f9d33ee
|
refs/heads/main
| 2023-05-10T19:36:20.978697
| 2023-05-06T21:27:29
| 2023-05-06T21:27:29
| 196,925,416
| 0
| 0
|
MIT
| 2023-03-31T15:23:01
| 2019-07-15T04:45:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,151
|
py
|
from __future__ import annotations
import imghdr
import io
import os
import time
from collections.abc import Callable
from typing import TYPE_CHECKING
from urllib.parse import urlencode
import pytest
from flask.testing import FlaskClient
from hypothesis import example, given, strategies as st
from loguru import logger
from PIL import Image
from tests.strategies import (
color_strategy,
dpi_strategy,
fmt_strategy,
opt_color_strategy,
size_strategy,
)
from tests.utils import compact_dict, make_route, size_id
if TYPE_CHECKING:
from holdmypics import Holdmypics
char_stragegy = st.characters(blacklist_categories=("Cc", "Cf", "Cs", "Co", "Cn"))
text_strategy = st.text(min_size=1, max_size=255, alphabet=char_stragegy)
long_text_strategy = st.text(min_size=16, max_size=255, alphabet=char_stragegy)
opt_text_strategt = st.one_of(st.none(), text_strategy)
args_strategy = st.fixed_dictionaries({"text": opt_text_strategt, "dpi": dpi_strategy})
def make_args(**kwargs: str | int | None):
from holdmypics.api.args import TextImageArgs
return TextImageArgs(**compact_dict(kwargs))
@given(
size=size_strategy,
img_fmt=fmt_strategy,
fg=color_strategy,
bg=color_strategy,
args=args_strategy,
)
@example(
size=(1920, 1080),
img_fmt="png",
fg="fff",
bg="000",
args={"text": "Some Text", "dpi": 300},
)
def test_create_images_using_function(
app_factory: Callable[[], Holdmypics],
size: tuple[int, int],
img_fmt: str,
fg: str,
bg: str,
args: dict[str, str | int | None],
):
from holdmypics.api.text import GeneratedTextImage
start = time.perf_counter()
with app_factory().test_request_context():
img_args = make_args(**args)
img = GeneratedTextImage(size, img_fmt, bg, fg, img_args)
assert img.get_save_kw()
p = img.get_path()
assert os.path.isfile(p)
assert os.path.getsize(p)
im = Image.open(p)
assert im.size == size
logger.debug("Elapsed: {0:.4f}", time.perf_counter() - start)
@given(
size=size_strategy,
img_fmt=fmt_strategy,
fg=opt_color_strategy,
bg=opt_color_strategy,
args=args_strategy,
)
def test_create_images_using_client(
app_factory: Callable[[], Holdmypics],
size: tuple[int, int],
img_fmt: str,
fg: str | None,
bg: str | None,
args: dict[str, str | int | None],
):
if bg is None and fg:
bg, fg = fg, None
start = time.perf_counter()
app = app_factory()
with app.test_client() as client:
url = make_route(
app,
"api.image_route",
size=size,
bg_color=bg,
fg_color=fg,
fmt=img_fmt,
**compact_dict(args),
)
# if args:
# url = "?".join((url, urlencode(compact_dict(args))))
res = client.get(url, follow_redirects=False)
assert res.status_code == 200
img_type = imghdr.what("filename", h=res.data)
assert img_type == img_fmt
im = Image.open(io.BytesIO(res.data))
assert im.size == size
logger.debug("Elapsed: {0:.4f}", time.perf_counter() - start)
def test_random_text_header(client: FlaskClient):
path = make_route(
client,
"api.image_route",
size=(638, 328),
bg_color="cef",
fg_color="555",
fmt="png",
random_text=True,
)
res = client.get(path, follow_redirects=False)
assert res.status_code == 200
assert "X-Random-Text" in res.headers
def test_random_text_ocr(client: FlaskClient):
pytesseract = pytest.importorskip("pytesseract", reason="pytesseract not installed")
path = make_route(
client,
"api.image_route",
size=(638, 328),
bg_color="cef",
fg_color="555",
fmt="png",
)
args = {"text": "Some Random Text", "dpi": None, "random_text": True}
query = urlencode({k: v for (k, v) in args.items() if v})
url = "?".join((path, query))
res = client.get(url, follow_redirects=False)
assert res.status_code == 200
img_type = imghdr.what("filename", h=res.data)
assert img_type == "png"
im = Image.open(io.BytesIO(res.data))
from_header = res.headers.get("X-Random-Text")
assert from_header is not None
from_ocr = pytesseract.image_to_string(im).strip()
logger.info("Got text from OCR: {0}", from_ocr)
assert from_ocr.casefold() == from_header.casefold()
@pytest.mark.parametrize(
"font_name", ["overpass", "fira-mono", "fira-sans", "roboto", "spectral"]
)
@pytest.mark.parametrize("size", [(3840, 2160), (960, 540)], ids=size_id)
def test_text_with_fonts(
app: Holdmypics, image_format: str, font_name: str, size: tuple[int, int]
):
from holdmypics.api.text import GeneratedTextImage
with app.test_request_context():
img_args = make_args(text=f"Text with font: {font_name}", font_name=font_name)
img = GeneratedTextImage(size, image_format, "cef", "555", img_args)
assert img.get_save_kw()
p = img.get_path()
assert os.path.isfile(p)
assert os.path.getsize(p)
|
[
"mvanwickle@gmail.com"
] |
mvanwickle@gmail.com
|
738b8cf3f59c9df7da1c0c7c55adfefafc2b9a16
|
9ba61317b33c4015ccfedd80a08532e6698c5b7f
|
/mnist_data/mnist_app.py
|
59c99360f921f9ad12f26afc3ca2d477430a7abd
|
[] |
no_license
|
csliuchang/tensorflow_project
|
68ff7cdda3805747529de8e57bdcb94e8abaf4f2
|
0ff718d814ee2e181a35dd968e596421e6986616
|
refs/heads/master
| 2020-06-25T23:57:36.669965
| 2019-08-02T11:28:10
| 2019-08-02T11:28:10
| 199,460,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,147
|
py
|
# coding utf-8
import tensorflow as tf
import numpy as np
from PIL import Image
import mnist_backward
import mnist_forward
# from .mnist_backward import MODEL_SAVE_PATH
def restore_model(testPicArr):
"""
" 创建一个默认图,在改图中执行以下操作"
args:
MOVING_AVERAGE_DECAY: 用于控制模型更新的速度,训练过程中会对每一个变量维护一个影子变量,这个影子变量的初始值
就是相应变量的初始值,每次变量更新时,影子变量就是随之更新。
preValue: axis返回每一行最大值的位置索引,得到概率最大的预测值
variables_to_restore: 通过使用variables_to_restore函数,可以使在加载模型的时候将影子变量直接映射到变量的本身,
所以我们在获取变量的滑动平均值的时候只需要获取到变量的本身值而不需要去获取影子变量。
"""
with tf.Graph().as_default() as tg:
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y = mnist_forward.forward(x, None)
preValue = tf.argmax(y, 1)
variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
MODEL_SAVE_PATH = "./model/"
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
preValue = sess.run(preValue, feed_dict={x: testPicArr})
return preValue
else:
print("No checkpoint file found")
return -1
def pre_pic(picName):
"""
ANTIALIAS: 抗锯齿
convert('L'): 变为灰度图
threshold : 阈值
"""
img = Image.open(picName)
reIm = img.resize((28, 28), Image.ANTIALIAS)
im_arr = np.array(reIm.convert('L'))
threshold = 50
for i in range(28):
for j in range(28):
im_arr[i][j] = 255 - im_arr[i][j]
if (im_arr[i][j] < threshold):
im_arr[i][j] = 0 # 纯黑色0
else: im_arr[i][j] = 255
nm_arr = im_arr.reshape([1, 784])
nm_arr = nm_arr.astype(np.float32)
img_ready = np.multiply(nm_arr, 1.0/255.0)
return img_ready
def application():
# testNum = input("input the number of test pictures:")
# for i in range(testNum):
# testPic = raw_input("the path of test picture:")
# testPicArr = pre_pic('./1.png')
# preValue = restore_model(testPicArr)
# print("The prediction number is", preValue)
# testPicArr = pre_pic('./2.png')
preValue = restore_model(pre_pic(raw_input("the path of test picture :")))
print("The prediction number is ", preValue)
def main():
application()
if __name__ == "__main__":
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
main()
|
[
"598306303@qq.com"
] |
598306303@qq.com
|
1694499f38c5a7460bdbbd2db70b473fc4b3672a
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/Lambda-Resource-Static-Assets/2-resources/BLOG/Data-Structures/1-Python/sort/bubble_sort.py
|
eab30b253fea793d1b3452317c7e81488330911d
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
"""
https://en.wikipedia.org/wiki/Bubble_sort
Worst-case performance: O(N^2)
If you call bubble_sort(arr,True), you can see the process of the sort
Default is simulation = False
"""
def bubble_sort(arr, simulation=False):
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
iteration = 0
if simulation:
print("iteration", iteration, ":", *arr)
x = -1
while swapped:
swapped = False
x = x + 1
for i in range(1, n - x):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
if simulation:
iteration = iteration + 1
print("iteration", iteration, ":", *arr)
return arr
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
9696ef11bc4fc57ad2c5358083c52b57eba4a87c
|
3e7a8c5630de986a4d02011b1bd368c041f3f477
|
/pytorch/pytorch实现多层感知机对手写数字分类.py
|
9bc6523f288118ada524b6e1da6d7b78f30ab5bd
|
[] |
no_license
|
gswyhq/hello-world
|
b9ef715f80d2b39c8efaa1aa2eb18a6257e26218
|
b1ab053a05e1f8c63b8b04d6904a3cdca450bd9f
|
refs/heads/master
| 2023-05-26T13:15:36.788620
| 2023-05-19T13:38:50
| 2023-05-19T13:38:50
| 158,821,148
| 16
| 6
| null | 2021-03-19T02:59:48
| 2018-11-23T11:04:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,428
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # 3.10 多层感知机的简洁实现
# In[ ]:
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
import torchvision
print(torch.__version__)
# ## 3.10.1 定义模型
# In[11]:
num_inputs, num_outputs, num_hiddens = 784, 10, 256
class FlattenLayer(torch.nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x): # x shape: (batch, *, *, ...)
return x.view(x.shape[0], -1)
net = nn.Sequential(
FlattenLayer(),
nn.Linear(num_inputs, num_hiddens),
nn.ReLU(),
nn.Linear(num_hiddens, num_outputs),
)
for params in net.parameters():
init.normal_(params, mean=0, std=0.01)
# ## 3.10.2 读取数据并训练模型
# In[12]:
batch_size = 256
def load_data_fashion_mnist(batch_size, resize=None, root='~/Datasets/FashionMNIST'):
"""Download the fashion mnist dataset and then load into memory."""
trans = []
if resize:
trans.append(torchvision.transforms.Resize(size=resize))
trans.append(torchvision.transforms.ToTensor())
transform = torchvision.transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)
mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_iter, test_iter
def evaluate_accuracy(data_iter, net, device=None):
if device is None and isinstance(net, torch.nn.Module):
# 如果没指定device就使用net的device
device = list(net.parameters())[0].device
acc_sum, n = 0.0, 0
with torch.no_grad():
for X, y in data_iter:
if isinstance(net, torch.nn.Module):
net.eval() # 评估模式, 这会关闭dropout
acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
net.train() # 改回训练模式
else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU
if('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数
# 将is_training设置成False
acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
else:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
if optimizer is None:
sgd(params, lr, batch_size)
else:
optimizer.step() # “softmax回归的简洁实现”一节将用到
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
train_iter, test_iter = load_data_fashion_mnist(batch_size)
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
num_epochs = 5
train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
# 来源: https://github.com/ShusenTang/Dive-into-DL-PyTorch/blob/master/code/chapter03_DL-basics/3.10_mlp-pytorch.ipynb
|
[
"gswyhq@126.com"
] |
gswyhq@126.com
|
d40cbd78c22105244ca33f02a4478fe8bbf16590
|
a8720518ad514ed4ce893afc43576b6d44ad80b1
|
/homepage/core/admin.py
|
42e968b08de540fbcf2531c8f353b64cdaf5e2c2
|
[] |
no_license
|
AlecAivazis/homepage-old
|
39d7b08219a1aa1341af8a1ce8ae17dab136ea7d
|
c48abea73d7118455ac207058cdf0f9d00352877
|
refs/heads/master
| 2023-03-12T14:13:44.321900
| 2015-10-28T23:44:43
| 2015-10-28T23:44:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
# -*- Python -*-
# -*- coding: utf-8 -*-
#
# alec aivazis
#
# this file describes the base administration for homepage
# homepage imports
from .models import Project, ProjectScreenshot
# import the django admin
from django.contrib import admin
# register the base models
admin.site.register(Project)
admin.site.register(ProjectScreenshot)
# end of file
|
[
"alec@aivazis.com"
] |
alec@aivazis.com
|
b683a8b084d250943a04f7b80d5cb9fa65abfa8c
|
ee974d693ca4c4156121f8cb385328b52eaac07c
|
/env/lib/python3.6/site-packages/imgaug/augmenters/contrast.py
|
73170bb37c584e7b4de84c07db04386b23c4f881
|
[] |
no_license
|
ngonhi/Attendance_Check_System_with_Face_Recognition
|
f4531cc4dee565d0e45c02217f73f3eda412b414
|
92ff88cbc0c740ad48e149033efd38137c9be88d
|
refs/heads/main
| 2023-03-12T07:03:25.302649
| 2021-02-26T15:37:33
| 2021-02-26T15:37:33
| 341,493,686
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:6724a23ca8693f52610258fd36edf644480787d709d4351080b5513adc338d47
size 57678
|
[
"Nqk180998!"
] |
Nqk180998!
|
1c65d2f8b68e1df88765b82bf73aa337b70d5bf6
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/networkcloud/azure-mgmt-networkcloud/generated_samples/agent_pools_create.py
|
e4702cfc48a1642bf33c4566afcd5f9eb16bf7eb
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,299
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.networkcloud import NetworkCloudMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-networkcloud
# USAGE
python agent_pools_create.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkCloudMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="123e4567-e89b-12d3-a456-426655440000",
)
response = client.agent_pools.begin_create_or_update(
resource_group_name="resourceGroupName",
kubernetes_cluster_name="kubernetesClusterName",
agent_pool_name="agentPoolName",
agent_pool_parameters={
"extendedLocation": {
"name": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.ExtendedLocation/customLocations/clusterExtendedLocationName",
"type": "CustomLocation",
},
"location": "location",
"properties": {
"administratorConfiguration": {
"adminUsername": "azure",
"sshPublicKeys": [
{
"keyData": "ssh-rsa AAtsE3njSONzDYRIZv/WLjVuMfrUSByHp+jfaaOLHTIIB4fJvo6dQUZxE20w2iDHV3tEkmnTo84eba97VMueQD6OzJPEyWZMRpz8UYWOd0IXeRqiFu1lawNblZhwNT/ojNZfpB3af/YDzwQCZgTcTRyNNhL4o/blKUmug0daSsSXISTRnIDpcf5qytjs1Xo+yYyJMvzLL59mhAyb3p/cD+Y3/s3WhAx+l0XOKpzXnblrv9d3q4c2tWmm/SyFqthaqd0= admin@vm"
}
],
},
"agentOptions": {"hugepagesCount": 96, "hugepagesSize": "1G"},
"attachedNetworkConfiguration": {
"l2Networks": [
{
"networkId": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.NetworkCloud/l2Networks/l2NetworkName",
"pluginType": "DPDK",
}
],
"l3Networks": [
{
"ipamEnabled": "False",
"networkId": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.NetworkCloud/l3Networks/l3NetworkName",
"pluginType": "SRIOV",
}
],
"trunkedNetworks": [
{
"networkId": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.NetworkCloud/trunkedNetworks/trunkedNetworkName",
"pluginType": "MACVLAN",
}
],
},
"availabilityZones": ["1", "2", "3"],
"count": 3,
"labels": [{"key": "kubernetes.label", "value": "true"}],
"mode": "System",
"taints": [{"key": "kubernetes.taint", "value": "true"}],
"upgradeSettings": {"maxSurge": "1"},
"vmSkuName": "NC_M16_v1",
},
"tags": {"key1": "myvalue1", "key2": "myvalue2"},
},
).result()
print(response)
# x-ms-original-file: specification/networkcloud/resource-manager/Microsoft.NetworkCloud/stable/2023-07-01/examples/AgentPools_Create.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
b92c719c838c44f7a9aab607d59e87dbb6da351d
|
e97060ebb056b8c037e9cf95be08158ecab321bc
|
/ibmsecurity/isds/interfaces.py
|
f6af6a062b6ec84043d1ca491d2a39cc3363fcb0
|
[
"Apache-2.0"
] |
permissive
|
sandermey/ibmsecurity
|
74ed8378e9ddb9f778b76d227e90cfb747511c1e
|
92ba7828260e96a6a323f4ac3830bfa43ee8dd7e
|
refs/heads/master
| 2020-04-09T22:49:06.302901
| 2018-03-07T05:04:37
| 2018-03-07T05:04:37
| 124,246,868
| 0
| 0
|
Apache-2.0
| 2018-03-07T14:21:29
| 2018-03-07T14:21:28
| null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get_all(isdsAppliance, check_mode=False, force=False):
"""
Retrieving all interfaces
:rtype: (str, dict)
"""
return isdsAppliance.invoke_get("Retrieving all interfaces", "/widgets/mgmtinterface")
def get_all_app(isdsAppliance, check_mode=False, force=False):
"""
Retrieving all application interfaces
:rtype: (str, dict)
"""
return isdsAppliance.invoke_get("Retrieving all application interfaces", "/application_interfaces")
def get(isdsAppliance, uuid, check_mode=False, force=False):
"""
Retrieving a single interface
"""
return isdsAppliance.invoke_get("Retrieving a single interface", "/application_interfaces/" + uuid + "/addresses/1")
def compare(isdsAppliance1, isdsAppliance2):
"""
Compare interfaces between 2 appliances
"""
ret_obj1 = get_all(isdsAppliance1)
ret_obj2 = get_all(isdsAppliance2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2)
|
[
"rsreera@us.ibm.com"
] |
rsreera@us.ibm.com
|
2eab281a1db78a8b6420122afd404fe0b0a12c37
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/third_party/incubator-tvm/python/tvm/contrib/miopen.py
|
e062ac1e735ecdca9a09778d0aaef388d7a78837
|
[
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Unlicense",
"LLVM-exception"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 3,805
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to MIOpen library."""
# pylint: disable-msg=C0103
import ctypes
import numpy as np
from .. import api as _api
from .. import intrin as _intrin
from .. import get_global_func as _get_global_func
def _get_np_int32_array_handle(arr):
"""Return a void_p handle for a numpy array
Parameters
----------
arr: numpy.NDArray
source numpy array
Returns
-------
ptr: ctypes.c_void_p
pointer to the data
"""
assert arr.dtype == np.int32
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
return ctypes.cast(ptr, ctypes.c_void_p)
def conv2d_forward(x,
w,
stride_h=1,
stride_w=1,
pad_h=0,
pad_w=0,
dilation_h=1,
dilation_w=1,
conv_mode=0,
data_type=1,
group_count=1):
"""Create an extern op that compute 2D convolution with MIOpen
Parameters
----------
x: Tensor
input feature map
w: Tensor
convolution weight
stride_h: int
height stride
stride_w: int
width stride
pad_h: int
height pad
pad_w: int
weight pad
dilation_h: int
height dilation
dilation_w: int
width dilation
conv_mode: int
0: miopenConvolution
1: miopenTranspose
data_type: int
0: miopenHalf (fp16)
1: miopenFloat (fp32)
group_count: int
number of groups
Returns
-------
y: Tensor
The result tensor
"""
assert (0 <= conv_mode <= 2), "0: miopenConvolution / 1: miopenTranspose / 2: miopenGroupConv"
if group_count > 1:
conv_mode = 2
oshape = np.zeros((len(x.shape)), dtype=np.int32)
xshape = x.shape
wshape = w.shape
setup_func = _get_global_func("tvm.contrib.miopen.conv2d.setup")
algo = setup_func(conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
xshape[0].value,
xshape[1].value,
xshape[2].value,
xshape[3].value,
wshape[0].value,
wshape[1].value,
wshape[2].value,
wshape[3].value,
group_count,
_get_np_int32_array_handle(oshape))
return _api.extern(
list(oshape), [x, w],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.miopen.conv2d.forward",
conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
algo,
ins[0],
ins[1],
outs[0]), name="y")
|
[
"1027252281@qq.com"
] |
1027252281@qq.com
|
ef9da3bac625f676f56fdc1d3a6ff80c1630d9da
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4060/224004060.py
|
0cfd110dffd480626cfac7c195c4d02697106c2d
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
from bots.botsconfig import *
from records004060 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'CF1', MIN: 1, MAX: 1},
{ID: 'CF2', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'L11', MIN: 0, MAX: 99},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
50ef32c94d5e5aee734232dd3e61fc70bf587629
|
8773e8c9b9a0a6e407f91b6f7c6321141d7e8356
|
/P0113.py
|
0b8fdc5ce3434443cc3611041a335ba30d4bcad7
|
[] |
no_license
|
westgate458/LeetCode
|
1836bb21e8dd95386ccab390f5fd04567a429a02
|
36d7f9e967a62db77622e0888f61999d7f37579a
|
refs/heads/master
| 2021-12-28T04:16:36.875737
| 2021-12-17T05:48:09
| 2021-12-17T05:48:09
| 152,928,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 13:23:08 2019
@author: Tianqi Guo
"""
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
# deal with trivial case
if not root:
return []
# if current node is a leaf
elif not root.left and not root.right:
# if remaining sum is equal to current value
if sum == root.val:
# current leaf is part of the desired path
return [[root.val]]
# if current leaf doesn't give the desired sum
else:
# return empty list
return []
# if current node is not a leaf
else:
# 1) recursively check the child branches, which return lists of values that add up to the desired sum
# 2) for the combined lists, add the value current node to each entry
# 3) return the updated list of path values to previous level
return [[root.val] + combo for combo in self.pathSum(root.left, sum - root.val) + self.pathSum(root.right, sum - root.val)]
|
[
"tqguo246@gmail.com"
] |
tqguo246@gmail.com
|
f4440770a165003e26a1fe82ab270ff926180faa
|
b822fd48d109c59a07cfef5196888c3f22c792b3
|
/aae/train.py
|
0c434501c97952da300c1a76736c15a79a31d1cb
|
[
"MIT"
] |
permissive
|
cupOJoseph/drawlikebobross
|
2e179f24bc59303be2782d95880235c57995a460
|
e4c33745c605d17ea6b9e5bea3cf339eb875a58a
|
refs/heads/master
| 2022-05-05T12:18:45.504161
| 2018-07-05T02:21:40
| 2018-07-05T02:21:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
import os
import sys
import argparse
import torch
import torchvision.transforms as transforms
sys.path.append(os.path.dirname(__file__))
from trainer import gan_trainer
from loader import BobRossDataset
# Params
parser = argparse.ArgumentParser(description='GAN trainer')
parser.add_argument('--epoch', default=500, type=int)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--cuda', default='true', type=str)
parser.add_argument('--resume', default='', type=str)
args, unknown = parser.parse_known_args()
cuda = True if 'true' in args.cuda.lower() else False
# cuda = True
transformers = transforms.Compose([
transforms.ToTensor(),
])
# Gan trainer
trainer = gan_trainer(z_dim=8, h_dim=128, filter_num=64, channel_num=3, lr=args.lr, cuda=cuda)
if __name__ == '__main__':
if args.resume:
trainer.load_(args.resume)
# dataset
train_dataset = BobRossDataset('../dataset/bobross.h5py', transform=transformers)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=8, shuffle=True,
pin_memory=cuda, num_workers=4
)
for e in range(trainer.start_epoch, args.epoch):
trainer.train(train_loader, e)
trainer.save_(e)
|
[
"kendricktan0814@gmail.com"
] |
kendricktan0814@gmail.com
|
faa0232a40c211a3852add071f93ba865508361c
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part002186.py
|
828488582f050dee16fc5a1431eafefa81c4dca2
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher14833(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.4.1.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.4.1.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher14833._instance is None:
CommutativeMatcher14833._instance = CommutativeMatcher14833()
return CommutativeMatcher14833._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 14832
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
67c3f9ec9c939f9d356a5fd38f10e6df68ba4e5e
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/celery/2016/4/test_filesystem.py
|
e96039d394100b3efa36fe17417d2f22b8d73151
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 2,471
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import shutil
import tempfile
from celery import states
from celery.backends.filesystem import FilesystemBackend
from celery.exceptions import ImproperlyConfigured
from celery.utils import uuid
from celery.tests.case import AppCase, skip
@skip.if_win32()
class test_FilesystemBackend(AppCase):
def setup(self):
self.directory = tempfile.mkdtemp()
self.url = 'file://' + self.directory
self.path = self.directory.encode('ascii')
def teardown(self):
shutil.rmtree(self.directory)
def test_a_path_is_required(self):
with self.assertRaises(ImproperlyConfigured):
FilesystemBackend(app=self.app)
def test_a_path_in_url(self):
tb = FilesystemBackend(app=self.app, url=self.url)
self.assertEqual(tb.path, self.path)
def test_path_is_incorrect(self):
with self.assertRaises(ImproperlyConfigured):
FilesystemBackend(app=self.app, url=self.url + '-incorrect')
def test_missing_task_is_PENDING(self):
tb = FilesystemBackend(app=self.app, url=self.url)
self.assertEqual(tb.get_state('xxx-does-not-exist'), states.PENDING)
def test_mark_as_done_writes_file(self):
tb = FilesystemBackend(app=self.app, url=self.url)
tb.mark_as_done(uuid(), 42)
self.assertEqual(len(os.listdir(self.directory)), 1)
def test_done_task_is_SUCCESS(self):
tb = FilesystemBackend(app=self.app, url=self.url)
tid = uuid()
tb.mark_as_done(tid, 42)
self.assertEqual(tb.get_state(tid), states.SUCCESS)
def test_correct_result(self):
data = {'foo': 'bar'}
tb = FilesystemBackend(app=self.app, url=self.url)
tid = uuid()
tb.mark_as_done(tid, data)
self.assertEqual(tb.get_result(tid), data)
def test_get_many(self):
data = {uuid(): 'foo', uuid(): 'bar', uuid(): 'baz'}
tb = FilesystemBackend(app=self.app, url=self.url)
for key, value in data.items():
tb.mark_as_done(key, value)
for key, result in tb.get_many(data.keys()):
self.assertEqual(result['result'], data[key])
def test_forget_deletes_file(self):
tb = FilesystemBackend(app=self.app, url=self.url)
tid = uuid()
tb.mark_as_done(tid, 42)
tb.forget(tid)
self.assertEqual(len(os.listdir(self.directory)), 0)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
37ec8231587eda9a2df9ede1cf5ec2282956d8c1
|
d4d3b859f136154427c36924f87525590853873a
|
/Tools.py
|
b104a92fdf8abdde26ffeaa6ddae63c1d381c369
|
[] |
no_license
|
lijiunderstand/Semantic_Segmentation_RefineNet
|
46b002f53254d5cc0bb4b9565382d2386a1d01c9
|
ec7ea477096dafc2052fa74fdb3277199251a35f
|
refs/heads/master
| 2020-04-18T10:16:16.303654
| 2018-07-11T16:41:18
| 2018-07-11T16:41:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,346
|
py
|
import time
import os
from tensorflow.python import pywrap_tensorflow
import numpy as np
from matplotlib import pyplot as plt
import cv2
class Tools:
def __init__(self):
pass
@staticmethod
def print_info(info):
print(time.strftime("%H:%M:%S", time.localtime()), info)
pass
# 新建目录
@staticmethod
def new_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
@staticmethod
def print_ckpt(ckpt_path):
reader = pywrap_tensorflow.NewCheckpointReader(ckpt_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
print(reader.get_tensor(key))
pass
pass
pass
class Visualize:
@staticmethod
def _discrete_matshow_adaptive(data, labels_names=[], title=""):
fig_size = [7, 6]
plt.rcParams["figure.figsize"] = fig_size
cmap = plt.get_cmap('Paired', np.max(data) - np.min(data) + 1)
mat = plt.matshow(data,
cmap=cmap,
vmin=np.min(data) - .5,
vmax=np.max(data) + .5)
cax = plt.colorbar(mat,
ticks=np.arange(np.min(data), np.max(data) + 1))
if labels_names:
cax.ax.set_yticklabels(labels_names)
if title:
plt.suptitle(title, fontsize=15, fontweight='bold')
fig = plt.gcf()
fig.savefig('data/tmp.jpg', dpi=300)
img = cv2.imread('data/tmp.jpg')
return img
@staticmethod
def visualize_segmentation_adaptive(predictions, segmentation_class_lut, title="Segmentation"):
# TODO: add non-adaptive visualization function, where the colorbar
# will be constant with names
unique_classes, relabeled_image = np.unique(predictions, return_inverse=True)
relabeled_image = relabeled_image.reshape(predictions.shape)
labels_names = []
for index, current_class_number in enumerate(unique_classes):
labels_names.append(str(index) + ' ' + segmentation_class_lut[current_class_number])
im = Visualize._discrete_matshow_adaptive(data=relabeled_image, labels_names=labels_names, title=title)
return im
pass
|
[
"woxinxie1234@163.com"
] |
woxinxie1234@163.com
|
428f590f1df538a492bd7a601fcb55bf5ce4ee3b
|
81fff1188c6918fbe7ccbcd9e61b62456f0abef5
|
/backend/settings.py
|
4531b84a2b3ef8fe367cd5479e52c1a85a274c8d
|
[
"MIT"
] |
permissive
|
mugash/cookbook-graphql-backend
|
2742087f3e6c4012f5c99c17c0518c27a8b30078
|
116e9dc2e5b0d63a2e4429a5c6f192cd0c43508d
|
refs/heads/master
| 2020-12-02T16:13:25.043539
| 2017-07-07T09:02:42
| 2017-07-07T09:02:42
| 96,519,903
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,205
|
py
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ng5ptohji7_9@u(e&az$ljy4(#ai+tj#dcj-hg92wdrjxdpcx6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'ingredients'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
GRAPHENE = {
'SCHEMA': 'backend.schema.schema'
}
|
[
"mugagambi@gmail.com"
] |
mugagambi@gmail.com
|
9d12a804190337836e997d0e7f88d8a22da76e8b
|
84147502b81451a2f9bcaabc00a35789afe132f0
|
/fastapistudy/test_chapter08.py
|
abf3208c284c9c77487e9a7b074f1a6fe1c9b59a
|
[] |
no_license
|
teng-tt/Fastapi_Study
|
bfdb3ca9f97cf8e2a928f56a77d0fc17c5bb9692
|
946decd07b0de98ce353d4008c7920c778a94a6f
|
refs/heads/master
| 2023-06-01T14:57:43.811721
| 2021-06-13T03:50:55
| 2021-06-13T03:50:55
| 360,474,922
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
# !/usr/bin/python3
# -*- coding:utf-8 -*-
# __author__ = "Teng"
from fastapi.testclient import TestClient
from run import app
""" 测试用例 """
client = TestClient(app) # 先安装pytest
def test_run_bg_task(): # 主义不是async def h函数test开头是一种pytest规范
response = client.post(url="/chapter08/background_tasks?framework=FastApi")
assert response.status_code == 200
assert response.json() == {"message": "任务已在后台运行"}
def test_dependency_run_bg_task():
response = client.post(url="/chapter08/dependency/background_tasks")
assert response.status_code == 200
assert response.json() is None
def test_dependency_run_bg_task_q():
response = client.post(url="/chapter08/dependency/background_tasks?q=1")
assert response.status_code == 200
assert response.json() == {"message": "README.md更新成功"}
|
[
"admin@example.com"
] |
admin@example.com
|
a6e2b69a7dd2c15bf8f960ee53cf86c2fca9e9cd
|
9eb35d6df7b0490d556623f84dba12bb05f30ee2
|
/models_and_validation/cross_validation.py
|
6b2524e4812c1b21e05c66400b87e28df0741375
|
[
"MIT"
] |
permissive
|
FelSiq/statistics-related
|
0b4442bd19338c5b0da7dcf5ecd53eb304dcd3f8
|
ee050202717fc368a3793b195dea03687026eb1f
|
refs/heads/master
| 2021-11-24T12:31:08.660652
| 2021-11-03T23:42:39
| 2021-11-03T23:42:39
| 211,089,869
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,204
|
py
|
"""Tests with cross-validation.
General and simple method used for estimating unknown parameters
from data.
General algorithm:
1. Randomly partition the data X of size n into X_{train} and X_{test}
Let m = X_{test}.size
Therefore, X_{train}.size = n - m
2. Fit the model using X_{train}
3. Test the fitted model using X_{test}
4. Repeat t times and average the results
Some of the most known Cross-validation procedures:
k-fold CV: partition the data X into k (approximately) equal-sized
subsets. t = k and m = n/k (tests of every subset once.)
Leave-one-out (LOO) CV: m = 1, t = n, testing on every sample once.
(The same as K-fold CV with k = n).
Monte Carlo CV: randomly sample subsets of suitable size for the
desired number of times.
"""
import typing as t
import numpy as np
def kfold_cv(
X: np.ndarray,
k: int = 10,
shuffle: bool = True,
return_inds: bool = False,
random_state: t.Optional[int] = None,
) -> t.Iterator[t.Tuple[np.ndarray, np.ndarray]]:
"""K-fold Cross Validation."""
if not isinstance(k, (int, np.int, np.int32, np.int64)):
raise TypeError("'k' must be an integer (got {}.)".format(type(k)))
if k <= 1:
raise ValueError("'k' must be a greater than 1 (got {}.)".format(k))
n_samples = X.size if X.ndim == 1 else X.shape[0]
if n_samples < max(2, k):
raise ValueError("Insufficient number of instances ({}). "
"Required num_inst >= max(2, k)".format(n_samples))
test_size = int(n_samples / k)
uneven_extra_inds = n_samples - k * test_size
indices = np.arange(n_samples)
if shuffle:
if random_state is not None:
np.random.seed(random_state)
np.random.shuffle(indices)
for _ in np.arange(k):
split_index = test_size + int(uneven_extra_inds > 0)
uneven_extra_inds -= 1
if return_inds:
yield indices[:split_index], indices[split_index:]
else:
yield X[indices[:split_index]], X[indices[split_index:]]
indices = np.roll(indices, -split_index)
def loo_cv(
X: np.ndarray,
shuffle: bool = True,
return_inds: bool = False,
random_state: t.Optional[int] = None,
) -> t.Iterator[t.Tuple[np.ndarray, np.ndarray]]:
"""LOOCV (Leave-one-out Cross Validation).
This is the same as n-fold Cross Validation (k = n).
"""
n_samples = X.size if X.ndim == 1 else X.shape[0]
for fold in kfold_cv(
X=X,
k=n_samples,
shuffle=shuffle,
return_inds=return_inds,
random_state=random_state):
yield fold
def jackknife(
X: np.ndarray,
k: int = 0,
shuffle: bool = True,
return_inds: bool = False,
random_state: t.Optional[int] = None,
) -> t.Iterator[np.ndarray]:
"""Jackknife iterator.
The jackknife procedure partitions the ``X`` data into k folds,
and, unlike the Cross Validation procedure, returns just the
`kept/train` examples.
If k <= 0, then k = `number of instances` is used.
"""
n_samples = X.size if X.ndim == 1 else X.shape[0]
k = n_samples if k <= 0 else k
for _, train_vals in kfold_cv(
X=X,
k=k,
shuffle=shuffle,
return_inds=return_inds,
random_state=random_state):
yield train_vals
def monte_carlo_cv(X: np.ndarray,
test_frac: float = 0.2,
n: int = 10,
return_inds: bool = False,
random_state: t.Optional[int] = None
) -> t.Iterator[t.Tuple[np.ndarray, np.ndarray]]:
"""Monte Carlo Cross Validation."""
if not isinstance(test_frac, float):
raise ValueError("'test_frac' must be float type (got {}.)".format(
type(test_frac)))
if not isinstance(n, int):
raise TypeError("'n' must be an integer (got {}.)".format(type(n)))
if n <= 0:
raise ValueError("'n' must be a positive value (got {}.)".format(n))
if not 0 < test_frac < 1:
raise ValueError(
"'test_frac' must be in (0.0, 1.0) interval (got {}.)".format(
test_frac))
n_samples = X.size if X.ndim == 1 else X.shape[0]
if n_samples < 2:
raise ValueError("Number of samples must be greater than 1 "
"(got {}.)".format(n_samples))
test_size = int(test_frac * n_samples)
if test_size == 0:
raise ValueError(
"Test subset with 0 instances. Please choose a higher 'test_frac' (got {}.)"
.format(test_frac))
if random_state is not None:
np.random.seed(random_state)
indices = np.arange(n_samples)
for _ in np.arange(n):
np.random.shuffle(indices)
inds_test, inds_train = np.split(indices, [test_size])
if return_inds:
yield inds_test, inds_train
else:
yield X[inds_test], X[inds_train]
def _test():
for fold in monte_carlo_cv(np.arange(2), test_frac=0.99, random_state=1):
print(fold)
if __name__ == "__main__":
_test()
|
[
"felipe.siqueira@usp.br"
] |
felipe.siqueira@usp.br
|
feb5e81dff37b3e59bb6b25fe4a2ad1dd53ee5f0
|
4591684136ac81244d5337197e97f58864d7fff3
|
/keras/keras18_ensemble1.py
|
afb0ad3b17ce43849797d7ecfda41ba1b2a9b692
|
[] |
no_license
|
marattang/AI_training
|
4b15e9d9734d77ae04beaae078749c85d832c9c5
|
f7f1a2b762dcf770335b62ee668ad1c54ccf1ceb
|
refs/heads/main
| 2023-06-20T19:05:10.385238
| 2021-07-26T00:29:10
| 2021-07-26T00:29:10
| 383,965,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,746
|
py
|
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
import numpy as np
from tensorflow.keras.layers import concatenate, Concatenate
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
x1 = np.array([range(100), range(301, 401), range(1, 101)])
x2 = np.array([range(101, 201), range(411, 511), range(100, 200)])
x1 = np.transpose(x1)
x2 = np.transpose(x2)
y1 = np.array([range(1001, 1101)])
# y1 = np.array(range(1001, 1101)) [] 빼면 (100,)
y1 = np.transpose(y1)
print(x1.shape, x2.shape, y1.shape)
# x1_train, x1_test, x2_train, x2_test, y_train, y_test = train_test_split(x1, x2, y1, test_size=0.2, random_state=8, shuffle=True)
x1_train, x1_test, x2_train, x2_test, y_train, y_test = train_test_split(x1, x2, y1, random_state=8, shuffle=True)
print(x1_train.shape, x2_train.shape, y_train.shape)
print(x1_test.shape, x2_test.shape, y_test.shape)
# 모델 구성
# 실습
# #2-1 모델1
input1 = Input(shape=(3,))
dense1 = Dense(55, activation='relu', name='dense1')(input1)
dense2 = Dense(32, activation='relu', name='dense2')(dense1)
dense3 = Dense(26, activation='relu', name='dense3')(dense2)
output1 = Dense(18)(dense3)
# #2-2 모델2
input2 = Input(shape=(3,))
dense11 = Dense(45, activation='relu', name='dense11')(input2)
dense12 = Dense(28, activation='relu', name='dense12')(dense11)
dense13 = Dense(20, activation='relu', name='dense13')(dense12)
dense14 = Dense(10, activation='relu', name='dense14')(dense13)
output2 = Dense(7)(dense14)
merge1 = concatenate([output1, output2]) # 첫번째 모델의 가장 마지막 부분, 두번째 모델의 가장 마지막 부분 병합.
# 과제 4. Concentenate로 코딩
merge1 = Concatenate(axis=1)([output1, output2])
merge2 = Dense(24)(merge1)
merge3 = Dense(15, activation='relu')(merge2)
last_output = Dense(1)(merge3)
# last_output = Dense(1)(merge1)
model = Model(inputs=[input1, input2], outputs=last_output)
# model.summary()
# 3. 컴파일, 훈련
model.compile(loss = 'mse', optimizer='adam', metrics=['mae']) # metrics=['mae','mse]
# 매트릭스를 보면 list로 받아들이고 있기 때문에 2개 이상을 쓰는 것도 가능하다.
model.fit([x1_train, x2_train], y_train, epochs=400, batch_size=25, verbose=1, validation_split=0.1)
# # 4. 평가, 예측
result = model.evaluate([x1_test, x2_test], y_test) # evaluate는 loss와 metrics를 출력한다.
print('result : ', result)
y_predict = model.predict([x1_test, x2_test])
r2 = r2_score(y_test, y_predict)
print('r2 스코어 : ', r2)
print('loss : ', result[0])
print('metrics["mae"] : ', result[1])
#r2 스코어 : 0.9914715240776343 -> 0.9997684219501827
# loss 소수점단위까지 낮추기 -> 0.20147289335727692
|
[
"tlawlfp0322@gmail.com"
] |
tlawlfp0322@gmail.com
|
e23cff71c1b73f9b5b94aefde10c99cbf6be3d6d
|
66fb1005aaeb25735a1ae9197ab7dd371862bbf2
|
/sysadmin_scripts/mongodb_data_model_3/updateDB.py
|
4cd67377a565ab4d07f01bf4870f41dc2df64706
|
[
"MIT"
] |
permissive
|
jfnavarro/st_misc
|
8a8d87df9e059dbd2a037d4267acd4e21593e7c4
|
bb8c1f2c4f05343f6dd5cc8b8cd8f405d825bd31
|
refs/heads/master
| 2021-01-01T17:13:58.540991
| 2017-08-22T13:04:26
| 2017-08-22T13:04:26
| 98,029,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,714
|
py
|
#! /usr/bin/env python
"""
Script to convert ST API database model 2 to model 3
@author: Jose Fernandez
"""
import argparse
import os
import sys
try:
from pymongo import MongoClient
from pymongo import errors
from bson.objectid import ObjectId
except ImportError, e:
sys.stderr.write("Pymongo was not found, aborting...\n")
sys.exit(1)
def usage():
print "Usage:"
print " python updateDB.py [options]"
print "Options:"
print " [-a, --user] => username for the MongoDB admin"
print " [-d, --password] => password for the MongoDB admin"
print " [-c, --host] => (default localhost)"
print " [-p, --port] => (default 27017)"
print "Description:"
print " Updates the ST database from data model 2 to data model 3."
print " NOTE: It is a wise idea to manually run mongodump to create a backup of the data state prior to the update!"
def main(user, password, host, port):
print "Connecting to database..."
mongoConnection = 0
try:
mongoConnection = MongoClient(host, port)
except errors.AutoReconnect:
print 'Cannot connect to database. \nPlease manually start up MongoDB.'
sys.exit(1)
print "mongoConnection" , mongoConnection
print "Authorizing..."
try:
db_admin = mongoConnection["admin"]
db_admin.authenticate(user, password)
print "Authorization Ok!"
except TypeError,e:
sys.stderr.write("There was an error in the authentication: " + str(e) + "\n")
sys.exit(1)
###############################################################################################################
db_analysis = mongoConnection["analysis"]
datasets = db_analysis["dataset"]
datasetinfos = db_analysis["datasetinfo"]
imagealignments = db_analysis["imagealignment"]
chips = db_analysis["chip"]
# Remove the experiment database
mongoConnection.drop_database("experiment")
# Remove some fields in analysis.dataset
datasets.update_many({}, {'$unset' : { 'overall_feature_count' : 1}})
datasets.update_many({}, {'$unset' : { 'overall_hit_count' : 1}})
datasets.update_many({}, {'$unset' : { 'unique_barcode_count' : 1}})
datasets.update_many({}, {'$unset' : { 'overall_hit_quartiles' : 1}})
datasets.update_many({}, {'$unset' : { 'gene_pooled_hit_quartiles' : 1}})
datasets.update_many({}, {'$unset' : { 'obo_foundry_terms' : 1}})
# Remove one field in analaysis.dataset
datasetinfos.update_many({}, {'$unset' : { 'comment' : 1}})
# Update the analysis.dataset collection to add the fields from analysis.imagealignment
for ele in datasets.find():
try:
dataset_id = ele["_id"]
al_id = ele["image_alignment_id"]
valid = True
if al_id is None or al_id == "":
valid = False
else:
al = imagealignments.find_one({"_id": ObjectId(al_id)})
if al is None or al == "":
valid = False
if valid:
datasets.update_one({"_id": dataset_id}, {"$set": {"figureHE": al["figure_blue"]}})
datasets.update_one({"_id": dataset_id}, {"$set": {"figureCy3": al["figure_red"]}})
datasets.update_one({"_id": dataset_id}, {"$set": {"alignmentMatrix": al["alignment_matrix"]}})
datasets.update_one({"_id": dataset_id}, {"$set": {"dataFile": str(dataset_id) + "_stdata.tsv.gz"}})
datasets.update_one({"_id": dataset_id}, {"$set": {"files": []}})
else:
datasets.delete_one({"_id": dataset_id})
except KeyError:
continue
datasets.delete_one({"_id": dataset_id})
# Remove image_alignment_id field from analysis.dataset
datasets.update_many({}, {'$unset' : { 'image_alignment_id' : 1}})
# Remove analysis.imagealignment and analysis.chip
imagealignments.drop()
chips.drop()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-c', '--host', type=str, default="localhost",
help='Address of the host to connect to')
parser.add_argument('-p', '--port', type=int, default=27017,
help='Port of the host to connect to')
parser.add_argument('-a', '--user', required=True, type=str,
help='the user name for the admin of the database')
parser.add_argument('-d', '--password', required=True, type=str,
help='the password for the admin of the database')
args = parser.parse_args()
main(args.user, args.password, args.host, args.port)
|
[
"jc.fernandez.navarro@gmail.com"
] |
jc.fernandez.navarro@gmail.com
|
b0a4eaabb5ac8a0fbcc6c8266ba143827102a7db
|
6b98eeaf6eb485e1cc4d56c3eda15b6482f21296
|
/app/grandchallenge/evaluation/migrations/0003_config_new_results_are_public.py
|
d09b3fcc4484ec754dc2e22c6730305c3811c093
|
[
"Apache-2.0"
] |
permissive
|
cnbillow/grand-challenge.org
|
ef2db96c7bc6919aa7ee993d43978f8c3185a71f
|
de90bd01ca6aa883dcb47c4d005bd15f38549752
|
refs/heads/master
| 2020-03-28T03:07:41.905924
| 2018-09-06T04:45:57
| 2018-09-06T04:45:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-03 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("evaluation", "0002_config_submission_page_html")]
operations = [
migrations.AddField(
model_name="config",
name="new_results_are_public",
field=models.BooleanField(
default=True,
help_text="If true, new results are automatically made public. If false, the challenge administrator must manually publish each new result.",
),
)
]
|
[
"jamesmeakin@gmail.com"
] |
jamesmeakin@gmail.com
|
7291d8ba50828814176f7abb193c7cde1e7ba1c6
|
9d93af3cf4a663fe5e9618061a37d0910c089cea
|
/tests/test_decompressor_decompressobj.py
|
8787afa53b47209b0870bd073017dbb59cd40c88
|
[
"BSD-3-Clause"
] |
permissive
|
glandium/python-zstandard
|
49bd96daed537169345f8024ead5a4fe599f8b4d
|
80c3142f274621d11b1e3c401e17ee4b983ab1a5
|
refs/heads/master
| 2022-12-07T00:34:01.413940
| 2022-10-29T22:33:35
| 2022-10-29T22:38:08
| 134,373,670
| 0
| 0
| null | 2018-05-22T06:57:46
| 2018-05-22T06:57:46
| null |
UTF-8
|
Python
| false
| false
| 3,799
|
py
|
import unittest
import zstandard as zstd
class TestDecompressor_decompressobj(unittest.TestCase):
def test_simple(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertFalse(dobj.eof)
self.assertEqual(dobj.decompress(data), b"foobar")
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertTrue(dobj.eof)
self.assertEqual(dobj.flush(), b"")
self.assertEqual(dobj.flush(10), b"")
self.assertEqual(dobj.flush(length=100), b"")
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
def test_input_types(self):
compressed = zstd.ZstdCompressor(level=1).compress(b"foo")
dctx = zstd.ZstdDecompressor()
mutable_array = bytearray(len(compressed))
mutable_array[:] = compressed
sources = [
memoryview(compressed),
bytearray(compressed),
mutable_array,
]
for source in sources:
dobj = dctx.decompressobj()
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertFalse(dobj.eof)
self.assertEqual(dobj.flush(), b"")
self.assertEqual(dobj.flush(10), b"")
self.assertEqual(dobj.flush(length=100), b"")
self.assertEqual(dobj.decompress(source), b"foo")
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertTrue(dobj.eof)
self.assertEqual(dobj.flush(), b"")
def test_unused_data(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.decompress(data + b"extra"), b"foobar")
self.assertTrue(dobj.eof)
self.assertEqual(dobj.unused_data, b"extra")
def test_reuse(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
dobj.decompress(data)
with self.assertRaisesRegex(
zstd.ZstdError, "cannot use a decompressobj"
):
dobj.decompress(data)
self.assertEqual(dobj.flush(), b"")
def test_multiple_decompress_calls(self):
expected = b"foobar" * 10
data = zstd.ZstdCompressor(level=1).compress(expected)
N = 3
partitioned_data = [
data[len(data) * i // N : len(data) * (i + 1) // N]
for i in range(N)
]
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
for partition in partitioned_data[:-1]:
decompressed = dobj.decompress(partition)
self.assertEqual(decompressed, b"")
self.assertEqual(dobj.unused_data, b"")
decompressed = dobj.decompress(partitioned_data[-1])
self.assertEqual(decompressed, expected)
def test_bad_write_size(self):
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(ValueError, "write_size must be positive"):
dctx.decompressobj(write_size=0)
def test_write_size(self):
source = b"foo" * 64 + b"bar" * 128
data = zstd.ZstdCompressor(level=1).compress(source)
dctx = zstd.ZstdDecompressor()
for i in range(128):
dobj = dctx.decompressobj(write_size=i + 1)
self.assertEqual(dobj.decompress(data), source)
|
[
"gregory.szorc@gmail.com"
] |
gregory.szorc@gmail.com
|
57848684f29088f1594e93d18a9cca0f11cda17c
|
c8781d3dc17202fcc1b5358475071c0a834c7f82
|
/ShowAndSearch/utils/parser.py
|
fe86a895837ffa7cf0261b804c6bb2395d13278d
|
[
"Apache-2.0"
] |
permissive
|
guchengxi1994/show-and-search
|
7b73d4a7a0250a0f70cf07b0de7695d6c8051545
|
e955a6677f3cd23b1f7ed247e828a5852ec6ab20
|
refs/heads/master
| 2022-12-22T06:28:36.601500
| 2020-09-22T05:17:14
| 2020-09-22T05:17:14
| 295,630,132
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-09-15 15:59:10
LastEditors: xiaoshuyui
LastEditTime: 2020-09-22 11:19:20
'''
import argparse
from ShowAndSearch.utils.logger import logger
class BaseParser(object):
def __init__(self, args: list, method: str):
"""
args type:list
arg type:tuple
arg example : ('-f','--force','force to show message even do not contain the module')
"""
self.args = args
self.method = method
self.parser = argparse.ArgumentParser(
description='{} method or module information'.format(self.method))
def get_parser(self):
self.parser.add_argument(
'question', metavar='QUESTION', type=str, nargs='*', help='the question to answer')
self.parser.add_argument(
'-v', '--version', help='show current version', action='store_true')
if len(self.args) > 0:
# self.parser.add_argument('-f','--force',help='force to show message even do not contain the module')
# self.parser.add_argument('-s','--simple',help='show simple message')
for i in self.args:
self.parser.add_argument(
i[0], i[1], help=i[2], action='store_true')
else:
logger.warning('args list is null')
return self.parser
def add_parser(self, arg):
if type(arg) is tuple and len(arg) == 3:
self.parser.add_argument(
arg[0], arg[1], help=arg[2], action='store_true')
else:
logger.error('input error')
return self.parser
|
[
"528490652@qq.com"
] |
528490652@qq.com
|
6a0aec763b5e253145873cd3bed3a39e26344b7f
|
016b7b0cdd60900ca9b2e26f959142c30313e00d
|
/report/views.py
|
1c7bc22d70a1ad296f8eed19d0e4747783cedc6b
|
[] |
no_license
|
groob/imagr_server
|
1e2abdab290b020225359103e72f56ecec7d52b5
|
81dfa968ed48ec719803dd0d53f17b92130e76da
|
refs/heads/master
| 2020-04-05T23:06:40.972867
| 2015-06-11T14:46:15
| 2015-06-11T14:46:15
| 51,090,790
| 0
| 0
| null | 2016-02-04T16:58:34
| 2016-02-04T16:58:34
| null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from models import *
# Create your views here.
@csrf_exempt
def index(request):
data = request.POST
serial = data['serial']
message = data['message']
status = data['status']
# see if the computer exists
if serial:
try:
computer = Computer.objects.get(serial_number=serial)
except Computer.DoesNotExist:
computer = Computer(serial_number=serial)
computer.current_status = status
computer.save()
# create a new report object
report = Report(computer=computer, message=message, status=status)
report.save()
return HttpResponse(data)
|
[
"graham@grahamgilbert.com"
] |
graham@grahamgilbert.com
|
beefc26ee5cc6b2af147350338002391621f0297
|
80e6e31054fe9105d2c26be7aac53c4cd6a4a33f
|
/scripts/kettle/oracle2hive.py
|
532ed84e5b965fec9f9c21de191d5f8bc008386d
|
[] |
no_license
|
alionishere/learn_python
|
8a7f6dc7d754a357d4cb720f4bc0d5c3e6e5e895
|
832b8e0579da0b7ab37e815be10204f8de1ad22d
|
refs/heads/master
| 2021-06-24T11:02:05.111027
| 2021-06-23T08:47:06
| 2021-06-23T08:47:06
| 223,834,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,666
|
py
|
# -*- coding: utf-8 -*-
import json
import dbutils
get_ora_meta_sql = '''
SELECT t1.OWNER
,t1.TABLE_NAME
,t1.COLUMN_NAME
,t1.DATA_TYPE
,t1.DATA_LENGTH
,t1.DATA_PRECISION
,t1.DATA_SCALE
,t2.COMMENTS
FROM DBA_TAB_COLUMNS t1
LEFT JOIN DBA_COL_COMMENTS t2
ON t1.OWNER = t2.OWNER
AND t1.TABLE_NAME = t2.TABLE_NAME
AND t1.COLUMN_NAME = t2.COLUMN_NAME
WHERE t1.OWNER = '%s'
AND t1.TABLE_NAME = '%s'
ORDER BY COLUMN_ID
'''
get_mysql_meta_sql = '''
SELECT TABLE_SCHEMA
,TABLE_NAME
,COLUMN_NAME
,ORDINAL_POSITION
,DATA_TYPE
,CHARACTER_MAXIMUM_LENGTH
,CHARACTER_OCTET_LENGTH
,NUMERIC_PRECISION
,NUMERIC_SCALE
,COLUMN_TYPE
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = '%s'
AND TABLE_NAME = '%s'
;
'''
def get_ora_meta(conn, sql, src_schema, src_tb, hive_schema='', hive_tb=''):
fields = []
field_attrs = []
cur = conn.cursor()
sql = sql % (src_schema.upper(), src_tb.upper())
print(sql)
print('--' * 30)
cur.execute(sql)
res = cur.fetchall()
for field in res:
if field[3] == 'CLOB' or field == 'DATE':
field_attr = field[2] + ' STRING ' + 'COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'VARCHAR2' or field[3] == 'VARCHAR' or field[3] == 'CHAR':
field_attr = field[2] + ' VARCHAR(' + str(field[4]) + ') COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'NUMBER':
field_attr = ''
if field[6] == 0:
field_attr = field[2] + ' BIGINT ' + 'COMMENT \'' + str(field[7]) + '\''
elif field[5] is not None and field[6] is not None:
field_attr = field[2] + ' DECIMAL(' + str(field[5]) + ',' + str(field[6]) + ') COMMENT \'' + str(field[7]) + '\''
else:
field_attr = field[2] + ' DECIMAL(23,4)' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
else:
field_attr = field[2] + ' STRING ' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
# print(field)
fields.append(field[2])
# break
cur.close()
fields = ','.join(fields)
field_attrs = ',\n'.join(field_attrs)
# print(field_attrs)
create_str = '''
CREATE TABLE %s.%s (\n%s\n)
PARTITIONED BY (TX_DATE STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001'
STORED AS PARQUE
LOCATION '/DWZQ/%s/%s';
'''
hive_tb = '%s_%s' % (src_schema, src_tb)
hive_tb_temp = '%s_TEMP' % hive_tb
create_stmt = create_str % (hive_schema.upper(), hive_tb.upper(), field_attrs, hive_schema.upper(), hive_tb.upper())
create_stmt_temp = create_str % (hive_schema.upper(), hive_tb_temp.upper(), field_attrs, hive_schema.upper(), hive_tb_temp.upper())
print(create_stmt)
print(create_stmt_temp)
return create_stmt
def get_mysql_meta(conn, sql, src_schema, src_tb, hive_schema='', hive_tb=''):
fields = []
field_attrs = []
cur = conn.cursor()
sql = sql % (src_schema.upper(), src_tb.upper())
print(sql)
print('--' * 30)
cur.execute(sql)
res = cur.fetchall()
for field in res:
if field[3] == 'CLOB' or field == 'DATE':
field_attr = field[2] + ' STRING ' + 'COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'VARCHAR2' or field[3] == 'VARCHAR' or field[3] == 'CHAR':
field_attr = field[2] + ' VARCHAR(' + str(field[4]) + ') COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'NUMBER':
field_attr = ''
if field[6] == 0:
field_attr = field[2] + ' BIGINT ' + 'COMMENT \'' + str(field[7]) + '\''
elif field[5] is not None and field[6] is not None:
field_attr = field[2] + ' DECIMAL(' + str(field[5]) + ',' + str(field[6]) + ') COMMENT \'' + str(field[7]) + '\''
else:
field_attr = field[2] + ' DECIMAL(23,4)' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
else:
field_attr = field[2] + ' STRING ' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
# print(field)
fields.append(field[2])
# break
cur.close()
fields = ','.join(fields)
field_attrs = ',\n'.join(field_attrs)
# print(field_attrs)
create_str = '''
CREATE TABLE %s.%s (\n%s\n)
PARTITIONED BY (TX_DATE STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001'
STORED AS PARQUE
LOCATION '/DWZQ/%s/%s';
'''
hive_tb = '%s_%s' % (src_schema, src_tb)
hive_tb_temp = '%s_TEMP' % hive_tb
create_stmt = create_str % (hive_schema.upper(), hive_tb.upper(), field_attrs, hive_schema.upper(), hive_tb.upper())
create_stmt_temp = create_str % (hive_schema.upper(), hive_tb_temp.upper(), field_attrs, hive_schema.upper(), hive_tb_temp.upper())
print(create_stmt)
print(create_stmt_temp)
return create_stmt
def run(tb_info_details):
for tb_info in tb_info_details:
conn = dbutils.get_conn(tb_info['data_src'].lower())
src_owner = tb_info['src_tb'].split('.')[0]
src_tb = tb_info['src_tb'].split('.')[1]
hive_schema = tb_info['data_src']
# hive_tb =
get_ora_meta(conn, get_ora_meta_sql, src_owner, src_tb, hive_schema)
if __name__ == '__main__':
with open('cfg.json', 'r') as f:
tb_info_details = json.load(f)
run(tb_info_details)
|
[
"wang_hongke@163.com"
] |
wang_hongke@163.com
|
ed264a15f7a93c1ffc3c24393851337420b1c8c5
|
5f67c696967456c063e5f8a0d14cf18cf845ad38
|
/archiv/_python/py4inf/gmane/gyear.py
|
30e892a7e7d666c4991703bf713d123ac276373c
|
[] |
no_license
|
wuxi20/Pythonista
|
3f2abf8c40fd6554a4d7596982c510e6ba3d6d38
|
acf12d264615749f605a0a6b6ea7ab72442e049c
|
refs/heads/master
| 2020-04-02T01:17:39.264328
| 2019-04-16T18:26:59
| 2019-04-16T18:26:59
| 153,848,116
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,705
|
py
|
import sqlite3
import time
import urllib.request, urllib.parse, urllib.error
import zlib
conn = sqlite3.connect('index.sqlite')
conn.text_factory = str
cur = conn.cursor()
# Determine the top ten organizations
cur.execute('''SELECT Messages.id, sender FROM Messages
JOIN Senders ON Messages.sender_id = Senders.id''')
sendorgs = dict()
for message_row in cur :
sender = message_row[1]
pieces = sender.split("@")
if len(pieces) != 2 : continue
dns = pieces[1]
sendorgs[dns] = sendorgs.get(dns,0) + 1
# pick the top schools
orgs = sorted(sendorgs, key=sendorgs.get, reverse=True)
orgs = orgs[:10]
print("Top 10 Organizations")
print(orgs)
# orgs = ['total'] + orgs
# Read through the messages
counts = dict()
years = list()
cur.execute('''SELECT Messages.id, sender, sent_at FROM Messages
JOIN Senders ON Messages.sender_id = Senders.id''')
for message_row in cur :
sender = message_row[1]
pieces = sender.split("@")
if len(pieces) != 2 : continue
dns = pieces[1]
if dns not in orgs : continue
year = message_row[2][:4]
if year not in years : years.append(year)
key = (year, dns)
counts[key] = counts.get(key,0) + 1
tkey = (year, 'total')
counts[tkey] = counts.get(tkey,0) + 1
years.sort()
print(counts)
print(years)
fhand = open('gline.js','w')
fhand.write("gline = [ ['Year'")
for org in orgs:
fhand.write(",'"+org+"'")
fhand.write("]")
# for year in years[1:-1]:
for year in years:
fhand.write(",\n['"+year+"'")
for org in orgs:
key = (year, org)
val = counts.get(key,0)
fhand.write(","+str(val))
fhand.write("]");
fhand.write("\n];\n")
print("Output written to gline.js")
|
[
"22399993@qq.com"
] |
22399993@qq.com
|
e8001a656cae6b21c00f398deca4b950fda490ed
|
ab98c033b4c2e80b304e9f77b740b6d545870b66
|
/data_aggregation/CreationBDD2_3_aliases+GoTerms/researchDG.py
|
86a42e0a2c3605a4904beaa95bb1e64303e6338a
|
[] |
no_license
|
yannistannier/textmining-light
|
503384c28f5fb4763293ced15337295685d84ba3
|
864210d127684d5af55336ceb8c0718d0f2c3e3c
|
refs/heads/master
| 2020-04-14T23:37:38.751779
| 2019-01-07T09:10:50
| 2019-01-07T09:10:50
| 164,209,545
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,808
|
py
|
from Bio import Entrez, SeqIO, Medline
import scipy.sparse as sp
import numpy as np
import pandas as pd
import sklearn
import sys
Entrez.email = "xxxxxxx@gmail.com"
def recupDictionnaires():
ens = []
file = open("dict.txt", "r")
doc = file.read()
dim = len(doc.split("##"))
i = 0
print ("STARTING RECUP")
for line in doc.split("##"):
if (i < dim-1):
symbol, aliases, name, diseases, goTerms, pubmedIds = line.split("|")
dico = {}
dico['symbol'] = symbol
dico['aliases'] = []
for alias in aliases.split(","):
if (not alias == ""):
dico['aliases'].append(alias)
dico['name'] = name
dico['diseases'] = []
for disease in diseases.split(","):
if (not disease == ""):
dico['diseases'].append(disease)
dico['goTerms'] = []
for goTerm in goTerms.split(","):
if (not goTerm == ""):
dico['goTerms'].append(goTerm)
dico['pubmedIds'] = []
for pubmedId in pubmedIds.split(","):
if (not pubmedId == ""):
dico['pubmedIds'].append(pubmedId)
ens.append(dico)
#print (dico)
i += 1
print ("END RECUP")
return ens
def pbmd_search(maladie,gene):
handle = Entrez.esearch(db = 'pubmed', term = maladie + " AND " + gene, retmax = '1000000000')
print (maladie + " AND " + gene)
result = Entrez.read(handle)
handle.close()
return(result)
def ecriture_file(maladie, gene, value):
output = open('output.txt', 'a')
output.write(maladie+"|" +gene+ "|" + str(value) + "##")
output.close()
def ecriture_end_dg():
output = open('output.txt', 'a')
output.write("@@")
output.close()
if __name__ == '__main__':
print("################### START SEARCHING ###################")
###### RECUPERATION DE LA LISTE DES DICTIONNAIRES DE GENES
ens = recupDictionnaires()
dim = len(ens)
print (dim)
###### OUVERTURE DU FIHCIER D'OUTPUT ET VERIFICATION DE SON CONTENU POUR REPRISE DE LA RECHERCHE
try :
file = open("output.txt", "r")
except IOError:
file = open("output.txt", "x")
file = open("output.txt", "r")
doc = file.read()
file.close()
lim = 0
already = doc.split("@@")
for line in already:
lim += 1
finalPart = already[-1]
print (finalPart)
print (lim)
delim = 0
for part in finalPart.split("##"):
delim += 1
i = 0
#while (i < dim):
###### POUR CHAQUE DICTIONNAIRE :
for dico in ens:
print ("Loading...")
print(str(i+1) + "/" + str(dim))
# S'IL EST DEJA ECRIT DANS LE FICHIER, PASSER AU SUIVANT
if (i < lim -1):
print ("... GENE : Already done")
# SINON :
# VERIFIER QUELLES MALADIES ONT DEJA ETE ECRITES
else :
print ("Preparing GENE...")
# PREPARATION DE LA REQUETE
genes = set([])
if (not len(dico['aliases']) == 0):
genes.update(dico['aliases'])
if (not len(dico['symbol']) == 0):
genes.add(dico['symbol'])
if (not len(dico['name']) == 0):
genes.add(dico['name'])
genes_string = " OR ".join(genes)
genes_string = "(" + genes_string + ")"
print ("GENE : ", genes_string)
nbD = len(dico['diseases'])
cptD = 0
#while (cptD < nbD):
##### POUR CHAQUE MALADIE
for disease in dico['diseases']:
# SI DEJA FAIT
if (cptD < delim -1):
print(str(cptD+1) + "/" + str(nbD))
print ("... DISEASE Already done")
# SI PAS ENCORE FAIT
else:
# REQUETE PUBMED
print ("SEARCHING DISEASE (" + str(cptD+1) + "/" + str(nbD) + ")" )
result = []
idList = set([])
result = pbmd_search(disease,genes_string)
idList.update(result['IdList'])
key = len(idList)
key = key + 1
print("PRINTING IN FILE ...")
ecriture_file(disease, dico['symbol'], key)
print ("***** OK !!")
cptD += 1
ecriture_end_dg()
delim = 0
i+=1
print("################### END SEARCHING ###################")
|
[
"tannier.yannis@gmail.com"
] |
tannier.yannis@gmail.com
|
049c12ca9c2ec403bf4f152a25d45aee9f1d0c8c
|
6cb1bd6816af5964c82e127e9e28cd6d0fd5fd7d
|
/05-05finally.py
|
e54a7e88b941cc9bae40f8afbc18c38fba553199
|
[] |
no_license
|
jinju-lee/Python-study
|
660f952b3c16d675147f870e1cab473177106636
|
c226bcb2c501c49ac157b6d3a3d18e515f3011f8
|
refs/heads/master
| 2021-05-08T21:15:32.782635
| 2018-02-11T14:32:26
| 2018-02-11T14:32:26
| 119,610,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
try:
num =int(input('숫자를 입력하세요:'))
except ValueError:
print('숫자가 아닙니다.')
else:
print(num)
finally:
print('finally는 무조건 실행됩니다.')
|
[
"noreply@github.com"
] |
jinju-lee.noreply@github.com
|
158affbd0c69bdb708234a227040c705d8a0b2f4
|
88853b9b6c3ae0c12cbd5bf83be3d48f2fe0159e
|
/document/eggs_package/gflux_egg/gflux/gflux/apps/station/management/commands/obselete/deal_with_shihua_none_fuel_data.py
|
e12cd0c2aed3e9ab1d6084886feb5e89e19b5a76
|
[] |
no_license
|
laoyin/nyf
|
9151da3368c2e636501bcf1ad52f895fe446c04b
|
a2b12f54827b6088548f060881b6dafa2d791a3a
|
refs/heads/master
| 2021-01-17T17:11:14.174968
| 2015-09-28T01:12:28
| 2015-09-28T01:12:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
# coding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from django.core.management.base import BaseCommand
from dash.core.backends.sql.models import get_dash_session_maker
from gflux.apps.common.models import SiteDayBatch
from datetime import datetime
from optparse import make_option
import sys,pdb,re
from dash.core.utils import getPaymentTypeByCard
import hashlib
import xlrd
import random
from xlrd import xldate_as_tuple
class Command(BaseCommand):
help = 'Deal with None Fuel Data'
option_list = BaseCommand.option_list + (
make_option('--file',help="set file path",type="string"),
make_option('--save_path',help="save file path",type="string"),
)
def handle(self, *args, **options):
print 'start...'
save_path=options['save_path']
try:
trans_count=30150001
book=xlrd.open_workbook(options['file'],encoding_override='gb2312')
sheets=book.sheets()
for sheet in sheets:
nrows=sheet.nrows
for row_idx in xrange(nrows):
#忽略表头
if row_idx == 0:
continue
#最后一行的总计忽略掉
if row_idx==nrows-1:
return
row=sheet.row_values(row_idx)
with open(save_path+'/changjiangdao.txt','a') as lf:
site=eval(repr(row[1])[1:]).decode('gbk','ignore')
#处理unicode编码,并去掉文字后面到空格
site = site.decode('unicode-escape').rstrip()+','
trans_type='1,'
cardnum='0,'
payment_type='1000,'
timestamp = row[3]+','
barcode=str(row[11][0:11])+','
pay=str(row[38])+','
quantity=str(row[31])+','
desc=eval(repr(row[10])[1:]).decode('gbk','ignore')
desc=desc.decode('unicode-escape')+','
price=str(row[35])+','
unitname=row[12]+','
pump_id='0,'
trans_id=str(trans_count)+'\n'
trans_count+=1
lf.write(site+trans_type+cardnum+payment_type+timestamp+barcode+pay+quantity+desc+price+unitname+pump_id+trans_id)
print 'ok'
except Exception,e:
print e
print 'end...'
|
[
"niyoufa@tmlsystem.com"
] |
niyoufa@tmlsystem.com
|
38769bed99e3caf79b45b1c948a5142c38462485
|
dee1aa5ce988f59165a8a651b28f471c468fff99
|
/tributary/lazy/output/__init__.py
|
3d73f9fd9636c043435f23c15dff8fc754280c14
|
[
"Apache-2.0"
] |
permissive
|
ceball/tributary
|
04f22e57048a3cb0375b57cdb30e62f69cf4a380
|
5e30f90d1a5cc176c0f231f525d9dc5a81353925
|
refs/heads/master
| 2022-12-05T20:35:33.631468
| 2020-08-28T13:14:24
| 2020-08-28T13:14:24
| 291,319,040
| 0
| 0
|
Apache-2.0
| 2020-08-29T17:39:39
| 2020-08-29T17:39:38
| null |
UTF-8
|
Python
| false
| false
| 3,819
|
py
|
from ..node import Node
def _print(node, cache=None):
if cache is None:
cache = {}
cache[id(node)] = node
ret = {node: []}
if node._dependencies:
for call, deps in node._dependencies.items():
# callable node
if hasattr(call, '_node_wrapper') and \
call._node_wrapper is not None:
val = call._node_wrapper._print(cache)
ret[node].append(val)
# args
for arg in deps[0]:
val = arg._print(cache)
ret[node].append(val)
# kwargs
for kwarg in deps[1].values():
val = kwarg._print(cache)
ret[node].append(val)
return ret
def Print(node):
return node._print({})
def Graph(node):
return node.print()
def GraphViz(node):
d = node.graph()
from graphviz import Digraph
dot = Digraph(node._name, strict=True)
dot.format = 'png'
def rec(nodes, parent):
for d in nodes:
if not isinstance(d, dict):
if d.isDirty():
dot.node(d._name, color='red', shape=d._graphvizshape)
dot.edge(d._name, parent._name, color='red')
else:
dot.node(d._name, shape=d._graphvizshape)
dot.edge(d._name, parent._name)
else:
for k in d:
if k.isDirty():
dot.node(k._name, color='red', shape=k._graphvizshape)
rec(d[k], k)
dot.edge(k._name, parent._name, color='red')
else:
dot.node(k._name, shape=k._graphvizshape)
rec(d[k], k)
dot.edge(k._name, parent._name)
for k in d:
if k.isDirty():
dot.node(k._name, color='red', shape=k._graphvizshape)
else:
dot.node(k._name, shape=k._graphvizshape)
rec(d[k], k)
return dot
def Dagre(node):
import ipydagred3 as dd3
G = dd3.Graph()
d = Graph(node)
def rec(nodes, parent):
for d in nodes:
if not isinstance(d, dict):
d._dd3g = G
if d.isDirty():
G.setNode(d._name, style='fill: #f00', shape="rect" if d._graphvizshape == "box" else d._graphvizshape)
# G.setEdge(d._name, parent._name, style='stroke: #f00')
else:
G.setNode(d._name, style='fill: #fff', shape="rect" if d._graphvizshape == "box" else d._graphvizshape)
G.setEdge(d._name, parent._name, style='stroke: #000')
else:
for k in d:
k._dd3g = G
if k.isDirty():
G.setNode(k._name, style='fill: #f00', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
rec(d[k], k)
# G.setEdge(k._name, parent._name, style='stroke: #f00')
else:
G.setNode(k._name, style='fill: #fff', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
rec(d[k], k)
G.setEdge(k._name, parent._name, style='stroke: #000')
for k in d:
k._dd3g = G
if k.isDirty():
G.setNode(k._name, style='fill: #f00', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
else:
G.setNode(k._name, style='fill: #fff', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
rec(d[k], k)
graph = dd3.DagreD3Widget(graph=G)
return graph
Node._print = _print
Node.print = Print
Node.graph = Graph
Node.graphviz = GraphViz
Node.dagre = Dagre
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
7e5da0fbf908161bc4084fef3c8bf28c92b54ad9
|
c868d681415d152ba331bd80e0ed542832f20f0e
|
/week 3/todo_project/todo_project/main/migrations/0005_auto_20200205_2301.py
|
819409651f247514434443d5e5acf7e6a3948904
|
[] |
no_license
|
Yeldarmt/BFDjango
|
a297a6b0c00ffb1a269f05c7e6665c5d34a51097
|
b8256ff1d5f2125495df66eabf267fc17e667aeb
|
refs/heads/master
| 2022-11-30T12:45:17.356453
| 2020-04-19T16:50:26
| 2020-04-19T16:50:26
| 233,515,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 906
|
py
|
# Generated by Django 2.0 on 2020-02-05 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20200203_1126'),
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('completed', models.BooleanField(default=False)),
],
),
migrations.DeleteModel(
name='Employee',
),
migrations.RemoveField(
model_name='myuser',
name='name',
),
migrations.RemoveField(
model_name='myuser',
name='sur_name',
),
]
|
[
"eldarmukhametkazin@gmail.com"
] |
eldarmukhametkazin@gmail.com
|
3467e9fbec6ceb28a2b2a98d25b2a0dbb03e4122
|
78e60a7d8a67ed76244004e8a3ed573fbf396e41
|
/samples/sq__unbind_skill.py
|
5e488f7e1a0d6a6147070822fc075df323fa31d5
|
[
"MIT"
] |
permissive
|
Crivez/apiclient-python
|
837a9f7cc0453ccd3121311adc7920b5fe6b3e33
|
860fc054f546152a101e29b1af388c381075ac47
|
refs/heads/master
| 2023-06-08T13:24:09.249704
| 2021-06-17T12:16:35
| 2021-06-17T12:16:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Unbind the skill with id = 1 from the user with id = 1.
APPLICATION_ID = 1
USER_ID = 1
SQ_SKILL_ID = 1
try:
res = voxapi.sq__unbind_skill(APPLICATION_ID,
USER_ID,
SQ_SKILL_ID)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
|
[
"andrey@voximplant.com"
] |
andrey@voximplant.com
|
21282b4075722ef249ada742b5404b049ef993c0
|
9d278285f2bc899ac93ec887b1c31880ed39bf56
|
/ondoc/diagnostic/migrations/0014_auto_20180427_1159.py
|
32ba48033c717f5b1bc268bb1082d320a0021623
|
[] |
no_license
|
ronit29/docprime
|
945c21f8787387b99e4916cb3ba1618bc2a85034
|
60d4caf6c52a8b70174a1f654bc792d825ba1054
|
refs/heads/master
| 2023-04-01T14:54:10.811765
| 2020-04-07T18:57:34
| 2020-04-07T18:57:34
| 353,953,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
# Generated by Django 2.0.2 on 2018-04-27 06:29
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('diagnostic', '0013_auto_20180426_1939'),
]
operations = [
migrations.AddField(
model_name='lab',
name='onboarding_status',
field=models.PositiveSmallIntegerField(choices=[(1, 'Not Onboarded'), (2, 'Onboarding Request Sent'), (3, 'Onboarded')], default=1),
),
migrations.AddField(
model_name='labonboardingtoken',
name='email',
field=models.EmailField(blank=True, max_length=100),
),
migrations.AddField(
model_name='labonboardingtoken',
name='mobile',
field=models.BigIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(9999999999), django.core.validators.MinValueValidator(1000000000)]),
),
]
|
[
"arunchaudhary@policybazaar.com"
] |
arunchaudhary@policybazaar.com
|
b255bd1e4fb2df23e823fb53929c10a3c852f996
|
98801e91bf1a78c5903449082113ecc154cd020e
|
/src/dron/notify/ntfy_desktop.py
|
369cf3cf6b8d8ba4eea5002b5597032c988ce1d5
|
[] |
no_license
|
karlicoss/dron
|
bcec62e3602fa12134fdb6b86cc54f839086eba5
|
395d8a259b083b86f3128240bfa8f905fa255921
|
refs/heads/master
| 2023-06-10T07:12:20.799184
| 2023-06-04T23:41:33
| 2023-06-04T23:48:07
| 236,066,875
| 39
| 2
| null | 2023-06-04T23:48:08
| 2020-01-24T19:14:14
|
Python
|
UTF-8
|
Python
| false
| false
| 302
|
py
|
#!/usr/bin/env python3
from .common import get_parser, IS_SYSTEMD
from .ntfy_common import run_ntfy
BACKEND = 'linux' if IS_SYSTEMD else 'darwin'
def main() -> None:
p = get_parser()
args = p.parse_args()
run_ntfy(job=args.job, backend=BACKEND)
if __name__ == '__main__':
main()
|
[
"karlicoss@gmail.com"
] |
karlicoss@gmail.com
|
a3afa5a2a3d5d7657e6d618c5fd7fedd42af7d4e
|
a550aece79bda789826b463280b91abffbf2d372
|
/django_projects/chat_app_channels/chatapp/chat/consumers.py
|
f0aa98c0798399935500a605d8bf556123c8a97f
|
[
"MIT"
] |
permissive
|
phiratio/learn_python
|
20376470eaa292c157fd01f52b3077e3a983cd5a
|
a32240d4355fb331805d515f96e1d009914e5c47
|
refs/heads/master
| 2022-11-27T07:07:45.712373
| 2020-12-03T22:04:31
| 2020-12-03T22:04:31
| 189,397,679
| 1
| 0
|
MIT
| 2022-11-22T04:40:27
| 2019-05-30T10:56:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,288
|
py
|
import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
|
[
"phiratio161@gmail.com"
] |
phiratio161@gmail.com
|
30df387384a195945b78fe44a457618949568134
|
98e761a1702351df3b3db91e4ee832ae25d213d1
|
/test/db_predict2.py
|
0c33754dac31c6939eb33c91f9c38e337e28bad6
|
[] |
no_license
|
jack139/face-test
|
ed637fdabace49c969dac8abbd12d2e80c589fec
|
3907bf1e84c1e346b4429da0e8ca919ca6404098
|
refs/heads/master
| 2023-01-18T18:33:24.812823
| 2020-11-23T13:32:22
| 2020-11-23T13:32:22
| 315,326,106
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
# -*- coding: utf-8 -*-
# 使用两个算法模型并行识别
import os, sys
import base64
#from datetime import datetime
from models.predict_plus import predict_parallel, predict_thread_db
if __name__ == "__main__":
if len(sys.argv)<4:
print("usage: python3 %s <knn|keras> <group_id> <test dir or file>" % sys.argv[0])
sys.exit(2)
#from facelib import api_func
classifier = sys.argv[1]
group_id = sys.argv[2]
test_thing = sys.argv[3]
if classifier not in ['knn', 'keras']:
print('invalid classifier!')
sys.exit(3)
if os.path.isdir(test_thing):
images = os.listdir(test_thing)
images = [os.path.join(test_thing, i) for i in images]
else:
images = [ test_thing ]
# Using the trained classifier, make predictions for unknown images
for image_file in images:
print("Looking for faces in {}".format(image_file))
with open(image_file, 'rb') as f:
image_data = f.read()
image_b64 = base64.b64encode(image_data)
# Find all people in the image using a trained classifier model
# Note: You can pass in either a classifier file name or a classifier model instance
#predictions = api_func.face_search('', image_b64, group_id)
#start_time = datetime.now()
predictions = predict_parallel(predict_thread_db, image_b64, group_id, classifier=classifier)
#print('[Time taken: {!s}]'.format(datetime.now() - start_time))
# Print results on the console
for name, (top, right, bottom, left), distance, count in predictions:
print("- Found {} at ({}, {}), distance={}, count={}".format(name, left, top, distance, count))
#for i in predictions:
# print("- Found {} at {}, distance={}".format(i['user_id'], i['location'], i['score']))
if len(predictions)==0:
print('Face not found!')
#print(predictions)
# Display results overlaid on an image
#knn.show_prediction_labels_on_image(image_file, predictions)
|
[
"jack139@gmail.com"
] |
jack139@gmail.com
|
2ddf079941f4eeee653c7ce2ed639d720e32d599
|
2e10e4f2d5372a82e63377813ff765b876624c30
|
/promenade/builder.py
|
394e14f7e4861ddd1e536805ad94025d33734eb2
|
[
"Apache-2.0"
] |
permissive
|
chnyda/airship-promenade
|
6ecdda3def775810733d41c88a4ce0391eaf7739
|
006f1b790772aa7f08852f2409d4c125e1c9f423
|
refs/heads/master
| 2020-03-20T01:01:11.409606
| 2018-06-20T07:17:36
| 2018-06-20T07:17:36
| 137,064,477
| 0
| 0
| null | 2018-06-12T11:52:41
| 2018-06-12T11:52:41
| null |
UTF-8
|
Python
| false
| false
| 4,512
|
py
|
from . import logging, renderer
import io
import itertools
import os
import requests
import stat
import tarfile
__all__ = ['Builder']
LOG = logging.getLogger(__name__)
class Builder:
def __init__(self, config, *, validators=False):
self.config = config
self.validators = validators
self._file_cache = None
@property
def file_cache(self):
if not self._file_cache:
self._build_file_cache()
return self._file_cache
def _build_file_cache(self):
self._file_cache = {}
for file_spec in self._file_specs:
path = file_spec['path']
if 'content' in file_spec:
data = file_spec['content']
elif 'tar_url' in file_spec:
data = _fetch_tar_content(
url=file_spec['tar_url'], path=file_spec['tar_path'])
self._file_cache[path] = {
'path': path,
'data': data,
'mode': file_spec['mode'],
}
@property
def _file_specs(self):
return itertools.chain(
self.config.get_path('HostSystem:files', []),
self.config.get_path('Genesis:files', []))
def build_all(self, *, output_dir):
self.build_genesis(output_dir=output_dir)
for node_document in self.config.iterate(
schema='promenade/KubernetesNode/v1'):
self.build_node(node_document, output_dir=output_dir)
if self.validators:
validate_script = renderer.render_template(
self.config, template='scripts/validate-cluster.sh')
_write_script(output_dir, 'validate-cluster.sh', validate_script)
def build_genesis(self, *, output_dir):
LOG.info('Building genesis script')
sub_config = self.config.extract_genesis_config()
tarball = renderer.build_tarball_from_roles(
config=sub_config,
roles=['common', 'genesis'],
file_specs=self.file_cache.values())
script = renderer.render_template(
sub_config,
template='scripts/genesis.sh',
context={
'tarball': tarball
})
_write_script(output_dir, 'genesis.sh', script)
if self.validators:
validate_script = renderer.render_template(
sub_config, template='scripts/validate-genesis.sh')
_write_script(output_dir, 'validate-genesis.sh', validate_script)
def build_node(self, node_document, *, output_dir):
node_name = node_document['metadata']['name']
LOG.info('Building script for node %s', node_name)
script = self.build_node_script(node_name)
_write_script(output_dir, _join_name(node_name), script)
if self.validators:
validate_script = self._build_node_validate_script(node_name)
_write_script(output_dir, 'validate-%s.sh' % node_name,
validate_script)
def build_node_script(self, node_name):
sub_config = self.config.extract_node_config(node_name)
file_spec_paths = [
f['path'] for f in self.config.get_path('HostSystem:files', [])
]
file_specs = [self.file_cache[p] for p in file_spec_paths]
tarball = renderer.build_tarball_from_roles(
config=sub_config, roles=['common', 'join'], file_specs=file_specs)
return renderer.render_template(
sub_config,
template='scripts/join.sh',
context={
'tarball': tarball
})
def _build_node_validate_script(self, node_name):
sub_config = self.config.extract_node_config(node_name)
return renderer.render_template(
sub_config, template='scripts/validate-join.sh')
def _fetch_tar_content(*, url, path):
LOG.debug('Fetching url=%s (tar path=%s)', url, path)
response = requests.get(url)
response.raise_for_status()
LOG.debug('Finished downloading url=%s (tar path=%s)', url, path)
f = io.BytesIO(response.content)
tf = tarfile.open(fileobj=f, mode='r')
buf_reader = tf.extractfile(path)
return buf_reader.read()
def _join_name(node_name):
return 'join-%s.sh' % node_name
def _write_script(output_dir, name, script):
path = os.path.join(output_dir, name)
with open(path, 'w') as f:
f.write(script)
os.chmod(
path,
os.stat(path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
|
[
"mark.m.burnett@gmail.com"
] |
mark.m.burnett@gmail.com
|
3aaa90180fa18a62d598af790848d69ced4297d4
|
56be7f6b6a1243c532af9ea98310ccea165a1e66
|
/day9/课件/2-并发编程/线程/1.线程.py
|
4c93970dcacc26f1ea5fd6bbb63b6619f8af13fe
|
[] |
no_license
|
214031230/Python21
|
55b0405ec4ad186b052cde7ebfb3f4bb636a3f30
|
d7fc68d3d23345df5bfb09d4a84686c8b49a5ad7
|
refs/heads/master
| 2021-05-26T06:00:53.393577
| 2019-01-09T02:29:04
| 2019-01-09T02:29:04
| 127,778,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 什么是进程 :是计算机资源分配的最小单位
# 什么是线程
# 线程和进程的关系 :
# 每一个进程中都至少有一个线程
# python中线程的特点
# 其他语言中线程的特点
# import os
# import time
# from threading import Thread
# n = 100
# def func(i):
# global n
# time.sleep(1)
# n -= 1
# print(os.getpid(),'thread%s'%i)
# t_l = []
# for i in range(100):
# t = Thread(target=func,args=(i,))
# t.start()
# t_l.append(t)
# for t in t_l:t.join()
# print('main : ',n)
# 每个进程里至少有一个主线程负责执行代码
# 在主线程中可以再开启一个新的线程
# 在同一个进程中就有两个线程同时在工作了
# 线程才是CPU调度的最小单位
# 多个线程之间的数据时共享的
# GIL锁 全局解释器锁
# 解释器的锅 Cpython解释器的问题
# 在同一个进程中 同一个时刻 只能有一个线程被CPU执行
# 导致高计算型 代码 不适合用python的多线程来解决
# 用多进程或者分布式来解决高计算型代码
|
[
"214031230@qq.com"
] |
214031230@qq.com
|
07cde02db4967803b942d8b688c42a2d75a4dfd1
|
d8ff8d809fcff5f8370e317d837485648cc6ac9b
|
/repr_test.py
|
7a91cc106da55a3a694d406c14e6e30f848cef3e
|
[] |
no_license
|
xiabofei/python_details
|
c9b1ebfdc9574201b8ac21ebd8aa5e0e8442d3de
|
1d6950d0fc32997e6f6e6cb269cd1ef4bb233c2f
|
refs/heads/master
| 2020-04-02T06:35:05.659746
| 2019-04-05T06:11:58
| 2019-04-05T06:11:58
| 60,343,232
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
#encoding=utf8
"""
werkzeug/routing/
Rule
__repr__方法
直接print某个对象时 打印出的信息
"""
class Test(object):
def __repr__(self):
return "<%s, called>" % (self.__class__.__name__)
t = Test()
print t
|
[
"xbf9xbf@qq.com"
] |
xbf9xbf@qq.com
|
f99d9eb488f96ead2b6615f8f842d81f126d62a7
|
1c21fa248091e31c362b95afafc5021211e85e63
|
/invensis_pmc/customer/migrations/0011_remove_customer_services_required.py
|
f754435ba5eeda7264f3ca535292b5a98b54cd81
|
[] |
no_license
|
anudeepnaidu95/dev5
|
3d3252a51fccbb794e78a91681708e1b3c1ce0d4
|
7351244b79be242aa2cad36dbe1adca22a744edc
|
refs/heads/master
| 2021-01-20T12:28:07.286078
| 2017-05-05T11:08:37
| 2017-05-05T11:08:37
| 90,365,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-14 14:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0010_auto_20160714_1930'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='services_required',
),
]
|
[
"anudeepnaidu95@gmail.com"
] |
anudeepnaidu95@gmail.com
|
acd99f69d096e095274ab74784eeb2d609a3a1d9
|
2b6df7c9f1ffbda9d46eda14a62010dac6cfe6da
|
/app/utils.py
|
9802bc03b16c95b262732b9479c811f1203dca51
|
[] |
no_license
|
its-arpit/tranageapp
|
355e03a362fe14f2cd992b4fa3021806bc4cc4e9
|
657859135f492cb0f58b532671ee799060aa5afa
|
refs/heads/master
| 2023-06-04T12:54:53.956808
| 2021-06-11T16:10:23
| 2021-06-11T16:10:23
| 376,124,298
| 0
| 0
| null | 2021-06-11T19:41:13
| 2021-06-11T19:30:24
| null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.core.mail import send_mail
import math, random
def generateOTP() :
digits = "0123456789"
OTP = ""
for i in range(4) :
OTP += digits[math.floor(random.random() * 10)]
return OTP
def send_email_otp(request):
email=request.GET.get ("email")
print(email)
o=generateOTP()
htmlgen = '<p>Your OTP is <strong>o</strong></p>'
send_mail('OTP request',o,'<your gmail id>',[email], fail_silently=False, html_message=htmlgen)
return HttpResponse(o)
|
[
"vkvineet66@gmail.com"
] |
vkvineet66@gmail.com
|
f4acc1b6983de406da0a4d2d27544abda966e6da
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/learnbgame_hops/operators/misc/triangulate_ngons.py
|
392d2147834108f25c1e15670053e2c33eba33e5
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
import bpy
class HOPS_OT_TriangulateNgons(bpy.types.Operator):
bl_idname = "hops.triangulate_ngons"
bl_label = "triangulate ngons"
bl_description = "triangulate ngons"
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
return True
def execute(self, context):
bpy.ops.object.convert(target='MESH')
for obj in bpy.context.selected_objects:
bpy.context.view_layer.objects.active = obj
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_face_by_sides(number=4, type='GREATER')
bpy.ops.mesh.quads_convert_to_tris(quad_method='BEAUTY', ngon_method='BEAUTY')
bpy.ops.object.editmode_toggle()
return {"FINISHED"}
class HOPS_OT_TriangulateModifier(bpy.types.Operator):
bl_idname = "hops.triangulate_mod"
bl_label = "triangulate mod"
bl_description = "triangulate mod"
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
return True
def execute(self, context):
selected = context.selected_objects
for obj in selected:
obj.modifiers.new(name="Triangulate", type="TRIANGULATE")
return {"FINISHED"}
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
681d91cef8af005ef2529196c8d13b4eddf0314d
|
dc39ccc50b7d34e5de84f3cc132c5cc096a32656
|
/Sanic/4-NamedURLRedirect/main.py
|
035aee847b4d83b28db14dd020c1e8de99eb6971
|
[] |
no_license
|
Shukladas1115/Python
|
0947aefd62a9ce4c3140360cb7259b031368709c
|
feb32bc2e2e7df377fc2d92330bfdacb83f31a55
|
refs/heads/master
| 2022-02-20T04:15:56.036495
| 2019-08-26T16:36:52
| 2019-08-26T16:36:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
from sanic import Sanic
from sanic import response
app = Sanic(__name__)
@app.route('/')
async def index(request):
# generate a URL for the endpoint `post_handler`
url = app.url_for('post_handler', post_id=5)
# the URL is `/posts/5`, redirect to it
return response.redirect(url)
@app.route('/posts/<post_id>')
async def post_handler(request, post_id):
return response.text('Post - {}'.format(post_id))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True)
|
[
"minhhien90@gmail.com"
] |
minhhien90@gmail.com
|
5a7c1fe7ce5663916fe48e08b1c9a759329dca0c
|
9bdd421f0bd5cb30a0429e11b23bd85ed34b006a
|
/account/views.py
|
019f9c68589e98f18cf1df20bd508d7642623be2
|
[] |
no_license
|
MrAch26/yugioh_django_proj
|
8f0f0fbf0cb6e4ec4fac8757a7236fbb08099689
|
9cd363a3ab9019c92973454dab5eb812894c4c37
|
refs/heads/main
| 2023-03-27T21:53:45.240113
| 2020-10-25T07:39:22
| 2020-10-25T07:39:22
| 305,049,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,289
|
py
|
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.urls import reverse, reverse_lazy
from django.views.generic import CreateView, UpdateView, DetailView
from account.forms import UserSignupForm, ProfileViewForm
from account.models import Profile
from trading_cards.models import Card, Trade
class UserSignUp(CreateView):
template_name = "registration/signup.html"
model = User
form_class = UserSignupForm
success_url = 'home'
failed_message = "The User couldn't be added"
def form_valid(self, form):
user_to_add = form.cleaned_data
# check the data we get when the form is valid
print("user_to_add", user_to_add)
super(UserSignUp, self).form_valid(form)
# inherit from ModelFormMixin : form_valid(form)
# Saves the form instance, sets the current object for the view,
# and redirects to get_success_url().
print("---------form valid")
# The form is valid, automatically sign-in the user
user = authenticate(self.request, username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
if user is None:
print("---------user none")
# User not validated for some reason, return standard form_valid() response
# Inherit from TemplateResponseMixin :
# render_to_response(context, **response_kwargs)¶
return self.render_to_response(
self.get_context_data(form=form,
failed_message=self.failed_message))
else:
print("-----------user good")
# Log the user in
login(self.request, user)
# Redirect to success url
return redirect(reverse(self.get_success_url()))
class ProfileView(UpdateView):
model = Profile
template_name = 'profile.html'
form_class = ProfileViewForm
success_url = reverse_lazy('home')
def my_deck(request):
trade = Trade.objects.all()
return render(request, 'my_deck.html', {'trade': trade})
class MyCard(DetailView):
model = Card
# todo: add details view for deck if relevant Maybe if time
|
[
"MrAch26@users.noreply.github.com"
] |
MrAch26@users.noreply.github.com
|
68f3fb9a96aa5c00e2fb8dedab67d2f23725c127
|
edb88981aa1420af7e074068ed7818b9d904a3dd
|
/trunk/minds/test/test_app_httpserver.py
|
91a8c2880f3ee0c0508f61d743a9000402944147
|
[] |
no_license
|
BackupTheBerlios/mindretrieve-svn
|
101c0f1dfc25d20d5f828b6fd0d43301b773af4e
|
463745fcf1c1d5b1f6c201c30bcc339c99b437ed
|
refs/heads/master
| 2021-01-22T13:57:31.225772
| 2006-04-28T04:24:43
| 2006-04-28T04:24:43
| 40,801,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,765
|
py
|
"""
"""
import StringIO
import unittest
from minds.safe_config import cfg as testcfg
from minds import app_httpserver
class AppHTTPRequestHandlerFixture(app_httpserver.AppHTTPRequestHandler):
def __init__(self):
pass
class TestAppHTTPRequestHandler(unittest.TestCase):
def _test_lookup(self, url, expected):
handler = AppHTTPRequestHandlerFixture()
self.assertEqual(handler._lookup_cgi(url), expected)
def test_lookup_cgi(self):
from minds.cgibin import history
from minds.cgibin import weblib
from minds.cgibin import weblibMultiForm
self._test_lookup('', (weblib, '/', '', ''))
self._test_lookup('/', (weblib, '/', '', ''))
self._test_lookup('/history/item?1', (history, '/history', '/item', '1'))
self._test_lookup('/weblib/multiform/100', (weblibMultiForm, '/weblib/multiform', '/100', ''))
class TestMisc(unittest.TestCase):
def test_convertPath2Module1(self):
self.assertEqual(
app_httpserver._convertPath2Module(r'./minds\admin\tmpl/home.html'),
('minds.admin.tmpl.home','home'),
)
def test_convertPath2Module2(self):
self.assertEqual(
app_httpserver._convertPath2Module(r'./minds\admin\snoop'),
('minds.admin.snoop','snoop'),
)
def test_convertPath2Module3(self):
self.assertEqual(
app_httpserver._convertPath2Module(r'/minds/admin/snoop.py'),
('minds.admin.snoop','snoop'),
)
class TestCGIFileFilter(unittest.TestCase):
DATA1 = """date:04/19/05\r
\r
line1
line2
"""
DATA2 = """line3
line4"""
def setUp(self):
self.buf = StringIO.StringIO()
self.fp = app_httpserver.CGIFileFilter(self.buf)
def test1(self):
self.fp.write('\r\n\r\n')
self.fp.flush()
self.assertEqual(self.buf.getvalue(), 'HTTP/1.0 200 OK\r\n\r\n\r\n')
def test_nodirective(self):
self.fp.write(self.DATA1)
self.fp.write(self.DATA2)
self.fp.flush()
self.assertEqual(self.buf.getvalue(), 'HTTP/1.0 200 OK\r\n' +
self.DATA1 + self.DATA2)
def test_status(self):
self.fp.write('404 not found\r\n')
self.fp.write(self.DATA1)
self.fp.write(self.DATA2)
self.fp.flush()
self.assertEqual(self.buf.getvalue(), 'HTTP/1.0 404 not found\r\n'
+ self.DATA1 + self.DATA2)
def test_location(self):
self.fp.write('loCATion : http://abc.com/index.html\r\n')
self.fp.write(self.DATA1)
self.fp.write(self.DATA2)
self.fp.flush()
self.assertEqual(self.buf.getvalue(),
"""HTTP/1.0 302 Found\r
loCATion : http://abc.com/index.html\r
""" + \
self.DATA1 + self.DATA2)
def test_states(self):
# verify CGIFileFilter has gone through each state
self.assertEqual(self.fp.state, self.fp.INIT)
self.fp.write('200 ok\r\n\r\n')
self.assertEqual(self.fp.state, self.fp.BUFFER)
self.fp.write('.'*(self.fp.MAX_BUFFER+1))
self.assertEqual(self.fp.state, self.fp.SENT)
buf_size = len(self.buf.getvalue())
self.assert_(buf_size > self.fp.MAX_BUFFER+1) # some HTTP info + content
# still accepting output at SENT state
self.fp.write('.')
self.assertEqual(len(self.buf.getvalue()), buf_size+1)
def test_buffer(self):
# verify data is buffered until flush
self.fp.write('200 ok\r\n\r\n')
self.fp.write('.')
self.assertEqual(len(self.buf.getvalue()), 0)
self.fp.flush()
self.assert_(len(self.buf.getvalue()) > 0)
if __name__ == '__main__':
unittest.main()
|
[
"tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990"
] |
tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990
|
3668b4615b62655571841c3fe2962d8a50e0b33f
|
1b5ab3f252069181b5e07d4d6d177ab82e942e51
|
/Homework3/Part 1/tt1.py
|
e219c5bb06a667aa68f443395c6215ac7c9e253b
|
[] |
no_license
|
phamhailongg/C4T9
|
59214081224f37b356e209d57f0865632dccc8f6
|
c400005012fb349c1388dd92c8e590322bb203e4
|
refs/heads/master
| 2021-07-06T11:10:05.283974
| 2019-05-05T21:46:04
| 2019-05-05T21:46:04
| 152,599,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
from turtle import *
speed(10)
color("red")
for i in range(4):
lt(120)
fd(100)
lt(-60)
fd(100)
lt(-120)
fd(100)
lt(-60)
fd(100)
lt(30)
mainloop()
|
[
"longphamhai123@gmail.com"
] |
longphamhai123@gmail.com
|
f7a0a2fa0e865a49765a53208422402c335ba849
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_discomposing.py
|
bafc1a64c9a8785f9dbb06454a7311b879d585bc
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
#calss header
class _DISCOMPOSING():
def __init__(self,):
self.name = "DISCOMPOSING"
self.definitions = discompose
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['discompose']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dd9ea44609ed4c96315ef9e7285fbe5f871730ce
|
30bd7e8abe0a15fbb8f1b1e4a3a9a15a3ad124a9
|
/romans/src/utils/roman.py
|
c2a15aca81d4265a81dc04ee59ee231d8b7b4fca
|
[] |
no_license
|
TiagoArrazi/Romans
|
c96cac19a36e5e89ea719b084693b2af0f6e1cf2
|
f2841931fb9b7428acdc4604dae0535508002781
|
refs/heads/master
| 2020-07-12T01:53:44.220206
| 2019-08-27T12:31:34
| 2019-08-27T12:31:34
| 204,688,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,057
|
py
|
from resources.symbols import symbols
class Roman:
@classmethod
def make_it_roman(cls, number):
if 900 <= int(number) <= 3000:
mult = divmod(int(number), 1000)
if mult[0] > 0 and mult[1] == 0:
return symbols["1000"] * mult[0]
c_amount = (1000 - int(number)) // 100
if c_amount > 0:
return f"{symbols['100']}{symbols['1000']}"
if c_amount < 0:
return f"{symbols['1000']}{abs(c_amount) * symbols['100']}"
elif 400 <= int(number) <= 800:
if number == "500":
return symbols["500"]
c_amount = (500 - int(number)) // 100
if c_amount > 0:
return f"{symbols['100']}{symbols['500']}"
if c_amount < 0:
return f"{symbols['500']}{abs(c_amount) * symbols['100']}"
elif 90 <= int(number) <= 300:
mult = divmod(int(number), 100)
if mult[0] > 0 and mult[1] == 0:
return symbols["100"] * mult[0]
c_amount = (100 - int(number)) // 10
if c_amount > 0:
return f"{symbols['10']}{symbols['100']}"
if c_amount < 0:
return f"{symbols['100']}{abs(c_amount) * symbols['10']}"
elif 40 <= int(number) <= 80:
if number == "50":
return symbols["50"]
c_amount = (50 - int(number)) // 10
if c_amount > 0:
return f"{symbols['10']}{symbols['50']}"
if c_amount < 0:
return f"{symbols['50']}{abs(c_amount) * symbols['10']}"
elif 9 <= int(number) <= 30:
mult = divmod(int(number), 10)
if mult[0] > 0 and mult[1] == 0:
return symbols["10"] * mult[0]
c_amount = (10 - int(number))
if c_amount > 0:
return f"{symbols['1']}{symbols['10']}"
if c_amount < 0:
return f"{symbols['10']}{abs(c_amount) * symbols['1']}"
elif 4 <= int(number) <= 8:
if number == "5":
return symbols["5"]
c_amount = (5 - int(number))
if c_amount > 0:
return f"{symbols['1']}{symbols['5']}"
if c_amount < 0:
return f"{symbols['5']}{abs(c_amount) * symbols['1']}"
else:
return int(number) * symbols["1"]
@classmethod
def convert_digits(cls, number):
try:
if 1 <= int(number) <= 3000:
strip_number_list = [(10 ** index) // 10 * int(n)
for index, n
in zip(range(len(number), 0, -1), number)]
converted_number_list = list()
for item in strip_number_list:
converted_number_list.append(cls.make_it_roman(str(item)))
return ''.join(converted_number_list)
except ValueError:
return False
return False
|
[
"tiago_arrazi98@outlook.com"
] |
tiago_arrazi98@outlook.com
|
f14c49b90c661b6ac6e514e6ecfda425e0621418
|
17f2ea360d2cc77ff45ab7b61f0e03d3c9d96247
|
/Stock/Trade/AccountManager/StopMode/DyStockStopLossMaMode.py
|
e53c9179190315d960e3820c1281b459be2d9596
|
[
"MIT"
] |
permissive
|
yutiansut/DevilYuan
|
89aba2728d42a686cf989b74283a5197edfe6b49
|
6467f8c33c4692d3616f0eb0b0bf974d16e95836
|
refs/heads/master
| 2020-03-19T18:43:11.674992
| 2018-06-11T03:17:32
| 2018-06-11T03:17:32
| 136,821,539
| 2
| 1
|
MIT
| 2018-06-11T03:17:33
| 2018-06-10T15:37:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
from .DyStockStopMode import *
from ...DyStockTradeCommon import *
class DyStockStopLossMaMode(DyStockStopMode):
stopLossPnlRatio = -5
def __init__(self, accountManager, dataEngine, ma):
super().__init__(accountManager)
self._dataEngine = dataEngine
self._daysEngine = self._dataEngine.daysEngine
self._ma = ma
self._tradeStartTime = '14:55:00'
self._curInit()
def _curInit(self):
self._preparedData = {}
def onOpen(self, date):
self._curInit()
preDate = self._daysEngine.tDaysOffsetInDb(date, -1)
for code in self._accountManager.curPos:
if not self._daysEngine.loadCode(code, [preDate, -self._ma+2], latestAdjFactorInDb=False):
return False
df = self._daysEngine.getDataFrame(code)
if df.shape[0] != (self._ma - 1): return False
self._preparedData[code] = df['close'].values.tolist()
return True
def _processAdj(self, code, tick):
""" 处理除复权 """
if tick.preClose is None: return
if code not in self._preparedData: return False
if code not in self._accountManager.curPos: return False
closes = self._preparedData[code]
if tick.preClose == closes[-1]:
return True
# 复权
adjFactor = tick.preClose/closes[-1]
# 价格
closes = list(map(lambda x,y:x*y, closes, [adjFactor]*len(closes)))
closes[-1] = tick.preClose # 浮点数的精度问题
self._preparedData[code] = closes
return True
def _stopLoss(self, code, tick):
ma = (sum(self._preparedData[code]) + tick.price)/self._ma
pos = self._accountManager.curPos[code]
if tick.price < ma and pos.pnlRatio < self.stopLossPnlRatio:
self._accountManager.closePos(tick.datetime, code, getattr(tick, DyStockTradeCommon.sellPrice), DyStockSellReason.stopLoss)
def onTicks(self, ticks):
for code, pos in self._accountManager.curPos.items():
tick = ticks.get(code)
if tick is None:
continue
if tick.time < self._tradeStartTime:
return
if not self._processAdj(code, tick):
continue
self._stopLoss(code, tick)
def onBars(self, bars):
self.onTicks(bars)
|
[
"louis_chu@163.com"
] |
louis_chu@163.com
|
05ff5f5a599c92b2f689b4a53313597783b6caef
|
727cdc7c9af6fdf6b4eb8444197718e5c6760019
|
/asin_keyword/cookie_sele_local.py
|
f538225e776bc68fc0a60a43531ef76c1f359afa
|
[] |
no_license
|
newer027/amazon_crawler
|
0cc6feb30f9180ae48ac936eeb6af41ec06eadfd
|
39d6867a8dd56b90dae5e98aa44e6df274439f8e
|
refs/heads/master
| 2022-11-23T17:04:33.995126
| 2020-04-03T15:42:42
| 2020-04-03T15:42:42
| 252,774,253
| 1
| 0
| null | 2022-11-22T01:44:53
| 2020-04-03T15:42:31
|
CSS
|
UTF-8
|
Python
| false
| false
| 4,831
|
py
|
import time, pickle
from selenium import webdriver
from .validation import validation, validation_jp
from PIL import Image
from pytesseract import image_to_string
from random import *
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
#from .emails import send_email
from pyvirtualdisplay import Display
import requests, shutil
def get_captcha(driver, element, path):
# now that we have the preliminary stuff out of the way time to get that image :D
location = element.location
size = element.size
# saves screenshot of entire page
driver.save_screenshot(path)
# uses PIL library to open image in memory
image = Image.open(path)
#image.show()
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
print(left, top, right, bottom)
# image = image.crop((left, top, right, bottom)) # defines crop points
image = image.crop((left*2, top*2, right*2, bottom*2)) # defines crop points
image.save(path, 'jpeg') # saves new cropped image
def validate(driver,country):
im = driver.find_element_by_id('auth-captcha-image')
# im = im.get_attribute('src')
# urlretrieve(im, "captcha.jpeg")
get_captcha(driver,im,"captcha.jpeg")
"""
agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:52.0) Gecko/20100101 Firefox/52.0'
headers = {
'User-Agent': agent,
'Host': "opfcaptcha-prod.s3.amazonaws.com",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch, br",
"Accept-Language": "zh-CN,zh;q=0.8",
"Connection": "keep-alive"
}
print(im)
response = requests.get(im, stream=True, headers=headers)
with open('captcha.jpeg', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
"""
im = 'captcha.jpeg'
im = Image.open(im)
im = im.convert('L')
def initTable(threshold=140):
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
return table
binaryImage = im.point(initTable(), '1')
binaryImage.show()
characters = image_to_string(binaryImage,config='-psm 7')
char_list = characters.split(' ')
characters = ''.join(char_list)
print(characters)
search_box = driver.find_element_by_id('ap_password')
if country=='jp':
for i in validation_jp['password']:
time.sleep(0.8-random()*0.5)
search_box.send_keys(i)
else:
for i in validation['password']:
time.sleep(0.8-random()*0.5)
search_box.send_keys(i)
time.sleep(4)
search_box = driver.find_element_by_id('auth-captcha-guess')
for i in characters:
time.sleep(0.5-random()*0.2)
search_box.send_keys(i)
time.sleep(3)
driver.find_element_by_id('signInSubmit').click()
# amazon = AmazonAPI('AKIAJ2TPWCFJMKXPSJVQ','ixmfea5B2xKFukyuR/aiBzkI6f+umvISvYlzzBBy','newer027-20')
# asin="B01LCDJ7LG"
# ean='0848061039726'
# product = amazon.lookup(ItemId=asin)
# ean = product.ean
# print(ean)
# driver = webdriver.Chrome("/Users/Jacob/Desktop/amazon_keyword/chromedriver")
def cookie_sele(country):
# display = Display(visible=0, size=(1920, 1080)).start()
# driver = webdriver.Firefox()
product_url_am = "https://vendorexpress.amazon.com/home?ref=VE_LANDING"
product_url_eu = "https://vendorexpress.amazon.eu/home?ref=VE_LANDING"
product_url_jp = "https://vendorexpress.amazon.co.jp/home?ref=VE_LANDING"
driver = webdriver.Chrome("/Users/Jacob/Desktop/amazon_keyword/chromedriver")
try:
if country=='am':
driver.get(product_url_am)
start_url="https://vendorexpress.amazon.com/ap/signin"
cookies="cookies_am.pkl"
elif country=='eu':
driver.get(product_url_eu)
start_url="https://vendorexpress.amazon.eu/ap/signin"
cookies="cookies_eu.pkl"
else:
driver.get(product_url_jp)
start_url="https://vendorexpress.amazon.co.jp/ap/signin"
cookies="cookies_jp.pkl"
search_box = driver.find_element_by_id('ap_email')
if country=='jp':
search_box.send_keys(validation_jp['id'])
else:
search_box.send_keys(validation['id'])
time.sleep(5)
while driver.current_url.startswith(start_url):
validate(driver,country)
time.sleep(15)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
print(soup.title.string)
pickle.dump( driver.get_cookies() , open(cookies,"wb"))
finally:
driver.quit()
#display.stop()
|
[
"newer027@gmail.com"
] |
newer027@gmail.com
|
3d7e4e20678a99f2171c8af491263ebaaf9b1f39
|
34a5921552537d96d9680f88b94be1706e5c8f1a
|
/facets/common/consts.py
|
a69b16a8e80bc022fd6cd90a8ebb69d306714fae
|
[
"Apache-2.0"
] |
permissive
|
hunterhector/DDSemantics
|
11f1a85486349627036626d3b638db39f70030fe
|
65235d8897bce403e5d628ed912e516b28254c74
|
refs/heads/master
| 2023-07-13T05:20:13.211363
| 2023-06-21T21:44:37
| 2023-06-21T21:44:37
| 123,484,643
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
"""Define constants"""
MENTION_START_TOKEN = "[unused1]"
MENTION_END_TOKEN = "[unused2]"
HEADER_END_TOKEN = "[unused3]"
CLS = "[CLS]"
SEP = "[SEP]"
|
[
"hunterhector@gmail.com"
] |
hunterhector@gmail.com
|
2897a346fb526a6e0e57f8e45f21e07c4f5a4bb0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02267/s584392095.py
|
6a5daa87a557535ac58fe4e73ddfee745deeec4f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
n = int(input())
s = [int(x) for x in input().split()]
q = int(input())
t = [int(x) for x in input().split()]
cnt = 0
for x in t:
if x in s:
cnt += 1
print(cnt)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
57ca134de961ddf6a494d2abcf622a29832b057d
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/07acc579db839170122fc66505a886ef023d5f4f-<execute_install>-bug.py
|
46e7feb105b1c13945aa837f5c23514abfcee9e9
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,454
|
py
|
def execute_install(self):
'\n uses the args list of roles to be installed, unless -f was specified. The list of roles\n can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.\n '
role_file = self.options.role_file
if ((len(self.args) == 0) and (role_file is None)):
raise AnsibleOptionsError('- you must specify a user/role name or a roles file')
no_deps = self.options.no_deps
force = self.options.force
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if (role_file.endswith('.yaml') or role_file.endswith('.yml')):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError(('Unable to load data from the requirements file: %s' % role_file))
if (required_roles is None):
raise AnsibleError(('No roles found in file: %s' % role_file))
for role in required_roles:
if ('include' not in role):
role = RoleRequirement.role_yaml_parse(role)
display.vvv(('found role %s in yaml file' % str(role)))
if (('name' not in role) and ('scm' not in role)):
raise AnsibleError('Must specify name or src for role')
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role['include']) as f_include:
try:
roles_left += [GalaxyRole(self.galaxy, **r) for r in (RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
except Exception as e:
msg = 'Unable to load data from the include requirements file: %s %s'
raise AnsibleError((msg % (role_file, e)))
else:
display.deprecated('going forward only the yaml format will be supported', version='2.6')
for rline in f.readlines():
if (rline.startswith('#') or (rline.strip() == '')):
continue
display.debug(('found role %s in text file' % str(rline)))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
raise AnsibleError(('Unable to open %s: %s' % (role_file, str(e))))
else:
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
if (role_file and self.args and (role.name not in self.args)):
display.vvv(('Skipping role %s' % role.name))
continue
display.vvv(('Processing role %s ' % role.name))
if (role.install_info is not None):
if ((role.install_info['version'] != role.version) or force):
if force:
display.display(('- changing role %s from %s to %s' % (role.name, role.install_info['version'], (role.version or 'unspecified'))))
role.remove()
else:
display.warning(('- %s (%s) is already installed - use --force to change version to %s' % (role.name, role.install_info['version'], (role.version or 'unspecified'))))
continue
elif (not force):
display.display(('- %s is already installed, skipping.' % str(role)))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(('- %s was NOT installed successfully: %s ' % (role.name, str(e))))
self.exit_without_ignore()
continue
if ((not no_deps) and installed):
role_dependencies = (role.metadata.get('dependencies') or [])
for dep in role_dependencies:
display.debug(('Installing dep %s' % dep))
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if (('.' not in dep_role.name) and ('.' not in dep_role.src) and (dep_role.scm is None)):
continue
if (dep_role.install_info is None):
if (dep_role not in roles_left):
display.display(('- adding dependency: %s' % str(dep_role)))
roles_left.append(dep_role)
else:
display.display(('- dependency %s already pending installation.' % dep_role.name))
elif (dep_role.install_info['version'] != dep_role.version):
display.warning(('- dependency %s from role %s differs from already installed version (%s), skipping' % (str(dep_role), role.name, dep_role.install_info['version'])))
else:
display.display(('- dependency %s is already installed, skipping.' % dep_role.name))
if (not installed):
display.warning(('- %s was NOT installed successfully.' % role.name))
self.exit_without_ignore()
return 0
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
e3f8022857e30db6341da4c8f88e97f4e7063f57
|
6d8faae66dd6332836bb11d7f02d6867c95d2a65
|
/glast/pointlike/python/uw/like/likelihood_fit.py
|
7dec093d277b975937d18b773ffedc0d0b11c596
|
[] |
no_license
|
Areustle/fermi-glast
|
9085f32f732bec6bf33079ce8e2ea2a0374d0228
|
c51b821522a5521af253973fdd080e304fae88cc
|
refs/heads/master
| 2021-01-01T16:04:44.289772
| 2017-09-12T16:35:52
| 2017-09-12T16:35:52
| 97,769,090
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,382
|
py
|
"""A module providing functionality for parametrizing a likelihood curve
by a simple function.
Classes:
LogLikelihood: a representation of a likelihood curve
Authors: Eric Wallace, Matthew Kerr
"""
__version__ = "$Revision: 1.1.2.1 $"
#$Header: /glast/ScienceTools/glast/pointlike/python/uw/like/Attic/likelihood_fit.py,v 1.1.2.1 2015/08/13 18:03:08 jasercio Exp $
import numpy as np
import scipy.optimize as opt
class LogLikelihood(object):
"""A representation of a log likelihood curve by a Poisson-like function
The representation used here follows the approach of Nolan, et al. The
likelihood is represented by a three-parameter function of a form similar
to the Poisson distribution PMF. The peak is found by maximizing the
provided log likelihood function. The parameters are then found by a least
squares fit using the peak, two points equispaced around it, and zero.
The parametrizing function is
f(s) = logL - logL_max = n*np.log(e*(s+b)) - e*(s+b) - n*np.log(n) + n
with n = e*(s_peak+b)
"""
def __init__(self,loglike,initial_value=1e-10,fit_max = True,pars=None):
"""Create a LogLikelihood instance.
loglike: The log likelihood function to be parametrized. Should be a
callable with one argument.
initial_value: An initial guess at the maximum of the provided
function. The default of 1e-10 should be a reasonable guess for
the normalization of a PowerLaw model.
fit_max: Whether to use fmin to maximize the log likelihood. If False
initial_value will be taken as the position of the maximum of the
log likelihood, so this should only be set to False if the value
passed as initial_value is the result of a previous maximization
of the provided function.
pars: A length three sequence providing the values for the parameters
of the fit function: s_peak,e, and b. If provided, these values
will be used and the loglike argument will be ignored.
"""
self.function = self._setup_function(loglike)
self.saved_points = np.array([])
self.saved_values = np.array([])
if pars is not None:
try:
assert(hasattr(pars,'__iter__') and len(pars)==3)
self.pars = pars
except AssertionError:
print('Keyword argument pars must be a sequence of length 3.')
print('Will attempt to derive parameters from provided function')
self.pars = self._find_pars(initial_value,fit_max = fit_max)
else:
self.pars = self._find_pars(initial_value,fit_max = fit_max)
self._check_agreement()
def _setup_function(self,function):
"""Setup caching of values passed to the log likelihood function."""
def _function(x):
if x in self.saved_points:
ll = self.saved_values[self.saved_points==x][0]
else:
ll = function(x)
self.saved_points = np.append(self.saved_points,x)
self.saved_values = np.append(self.saved_values,ll)
return ll
return _function
def _find_pars(self,initial_value,fit_max = False):
"""Find the best fit parameters for the fit function"""
if fit_max:
self.mode = opt.fmin(lambda x: -self.function(x),initial_value)[0]
else:
self.mode = initial_value
self.max = self.function(self.mode)
#xs = np.array([0,max/2,max,max*2])
#ys = np.array([self.function(x) for x in xs])
xs = self.saved_points.copy()
ys = self.saved_values.copy()
ys = ys - ys.max()
return opt.leastsq(lambda x:self._poisson(x,xs)-ys,np.array([self.mode,10/self.mode,xs[-1]]),maxfev=5000)[0]
def _poisson(self,pars,s):
"""Calculate the value of the parametrizing function for some parameters.
pars: A sequence of length 3 providing the parameters s_peak, e, and b.
s: The point at which to evaluate the function. Can be a numpy array.
"""
if pars[0]<0: return -1e10
s_peak,e,b = pars[0],pars[1],pars[2];n = e*(s_peak+b)
#logL - logL_max = n*np.log(e*(s+b))-e*(s+b) - n*np.log(e*(s_peak+b))+e*(s_peak+b)
#simplified:
return n*np.log((s+b)/(s_peak+b)) + e*(s_peak-s)
def __call__(self,x):
"""Return the value of the parametrizing function at point x."""
return self._poisson(self.pars,x) + self.max
def find_logl_change(self,initial_value,delta_logl):
"""Find the points where the likelihood has decreased by delta_logl.
Returns a tuple of the (low, high) values. If the likelihood at zero
differs from the max by less than the specified change, return zero
for the lower value.
"""
#First, find lower value
lo = 1e-20 #basically zero
hi = initial_value
ll_0 = self.function(hi)
if ll_0-self.function(lo)>delta_logl:
for i in xrange(20):
avg = .5*(hi+lo)
ll = self.function(avg)
if ll_0-ll<delta_logl: hi = avg
else: lo = avg
if abs(ll_0-ll-delta_logl)<.01: break
lo_val = avg
else: lo_val = lo
#Now the upper value
lo = initial_value
hi = initial_value*10
while ll_0-self.function(hi)<delta_logl: hi+=1
for i in xrange(20):
avg = .5*(lo+hi)
ll = self.function(avg)
if ll_0-ll<delta_logl: lo = avg
else: hi = avg
if abs(ll_0-ll-delta_logl)<.01: break
hi_val = avg
return (lo_val,hi_val)
def _check_agreement(self):
lo,hi = self.find_logl_change(self.mode,10)
lo_ll,hi_ll = self.function(lo),self.function(hi)
lo_val,hi_val = self(lo),self(hi)
if abs(1-lo_ll/lo_val) > .05:
print("Warning: fit function differs from log likelihood by {0:.02}\% in the low tail".format((1-lo_ll/lo_val)*100))
if abs(1-hi_ll/hi_val) > .05:
print("Warning: fit function differs from log likelihood by {0:.02}\% in the high tail".format((1-lo_ll/lo_val)*100))
def ts(self):
return self(self.mode)-self(0)
|
[
"areustledev@gmail.com"
] |
areustledev@gmail.com
|
a95c88307396ee0164e6f263644fc07b185a3d85
|
1089bc72856fe3ef0edd4b17b2f07b8ec5de8e14
|
/firecares/settings/base.py
|
1090ad22ec43635bb3cf9e8ae536318c7280b299
|
[
"MIT"
] |
permissive
|
JWileczek/firecares
|
e521c9d9f829fc60f13c2d051be89b5feadb5fc0
|
dd82e6e720cdaaf0bacd7a2cc51669341a29ffae
|
refs/heads/master
| 2020-12-25T12:41:04.124970
| 2015-08-30T15:38:56
| 2015-08-30T15:38:56
| 41,690,086
| 0
| 0
| null | 2015-08-31T17:29:12
| 2015-08-31T17:29:12
| null |
UTF-8
|
Python
| false
| false
| 7,905
|
py
|
import os
from kombu import Queue
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.getenv('DATABASE_NAME', 'firecares'),
'USER': os.getenv('DATABASE_USER', 'firecares'),
'PASSWORD': os.getenv('DATABASE_PASSWORD', 'password'),
'HOST': os.getenv('DATABASE_HOST', 'localhost'),
'PORT': os.getenv('DATABASE_PORT', '5432'),
},
'nfirs': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.getenv('NFIRS_DATABASE_NAME', 'nfirs'),
'USER': os.getenv('NFIRS_DATABASE_USER', 'firecares'),
'PASSWORD': os.getenv('NFIRS_DATABASE_PASSWORD', 'password'),
'PORT': os.getenv('NFIRS_DATABASE_PORT', '5432'),
'HOST': os.getenv('NFIRS_DATABASE_HOST', 'localhost'),
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/var/www/firecares/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/firecares/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$keb7sv^%c+_7+94u6_!lc3%a-3ima9eh!xyj%$xa8yibv&ogr'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"firecares.firecares_core.context_processors.third_party_tracking_ids",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'firecares.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'firecares.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'autocomplete_light',
'django.contrib.admin',
'django.contrib.gis',
'django.contrib.humanize',
'firecares.firecares_core',
'firecares.firestation',
'firecares.usgs',
'jsonfield',
'compressor',
'storages',
'widget_tweaks',
'firecares.tasks',
'registration'
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Celery settings.
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@127.0.0.1//')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME', None)
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', None)
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', None)
MAPBOX_ACCESS_TOKEN = os.getenv('MAPBOX_ACCESS_TOKEN', None)
GOOGLE_ANALYTICS_TRACKING_ID = os.getenv('GOOGLE_ANALYTICS_TRACKING_ID', None)
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
CELERY_IMPORTS = (
'firecares.tasks.cache',
'firecares.tasks.update',
)
CELERY_QUEUES = [
Queue('default', routing_key='default'),
Queue('cache', routing_key='cache'),
Queue('update', routing_key='update'),
Queue('email', routing_key='email'),
]
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_OPEN = False
EMAIL_HOST = os.getenv('EMAIL_HOST', 'localhost')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD', '')
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', '')
EMAIL_PORT = os.getenv('EMAIL_PORT', 25)
EMAIL_SUBJECT_PREFIX = '[FireCARES] '
SERVER_EMAIL = os.getenv('SERVER_EMAIL', '')
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', '')
|
[
"garnertb@gmail.com"
] |
garnertb@gmail.com
|
4ceda0b049891a9c2963a7c0c48c3f511140ac69
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/HundredRun/PG_0132+151/sdB_PG_0132+151_lc.py
|
8918441d2824176148a8e7a956f7ebd08c4bc153
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[356.757495,15.400942], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_0132+151 /sdB_PG_0132+151_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
a8824158345cddc59be0477b3353fbdb3dbef6da
|
fc2d2163e790741de0c0e1aa337948cfeb5b6ba9
|
/tests/benchmarks/micro/NestedFunctionClosure.py
|
925ad5c31e3bbdfcba39ca286facd4a95fe0b59e
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
nmoehrle/Nuitka
|
bcd20531f150ada82c8414620dca6c5424be64d1
|
317d1e4e49ef8b3bdfe2f80f2464040d644588b2
|
refs/heads/master
| 2023-06-22T09:56:23.604822
| 2017-11-29T14:10:01
| 2017-11-29T14:10:01
| 122,110,166
| 0
| 0
|
Apache-2.0
| 2018-02-19T19:29:05
| 2018-02-19T19:29:05
| null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
# Copyright 2017, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
x = 1
def f():
c = x+1
def g():
return c
return g()
def caller():
for i in range(10000):
f()
if __name__ == "__main__":
caller()
|
[
"kay.hayen@gmail.com"
] |
kay.hayen@gmail.com
|
c4070e57949127be2bf575ae160cb07672a86fd4
|
064404a6e65dc4bb78624e47fb8010615e20fbe8
|
/opsgenie_sdk/api/alert/add_details_to_alert_payload.py
|
bbc3a87ef3d5178e5be8868d78923a57e5e51352
|
[
"Apache-2.0"
] |
permissive
|
lyongjie20/opsgenie-python-sdk
|
97de823d958995f44b1934c1aaf1b5740a8efd1e
|
0d20d2314522fc0fd8ca5f0faa16f7c96387e123
|
refs/heads/master
| 2023-07-01T14:31:27.379893
| 2021-08-02T13:30:07
| 2021-08-02T13:30:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,705
|
py
|
# coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: support@opsgenie.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AddDetailsToAlertPayload(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'user': 'str',
'note': 'str',
'source': 'str',
'details': 'dict(str, str)'
}
attribute_map = {
'user': 'user',
'note': 'note',
'source': 'source',
'details': 'details'
}
def __init__(self, user=None, note=None, source=None, details=None): # noqa: E501
"""AddDetailsToAlertPayload - a model defined in OpenAPI""" # noqa: E501
self._user = None
self._note = None
self._source = None
self._details = None
self.discriminator = None
if user is not None:
self.user = user
if note is not None:
self.note = note
if source is not None:
self.source = source
self.details = details
@property
def user(self):
"""Gets the user of this AddDetailsToAlertPayload. # noqa: E501
Display name of the request owner # noqa: E501
:return: The user of this AddDetailsToAlertPayload. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this AddDetailsToAlertPayload.
Display name of the request owner # noqa: E501
:param user: The user of this AddDetailsToAlertPayload. # noqa: E501
:type: str
"""
self._user = user
@property
def note(self):
"""Gets the note of this AddDetailsToAlertPayload. # noqa: E501
Additional note that will be added while creating the alert # noqa: E501
:return: The note of this AddDetailsToAlertPayload. # noqa: E501
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""Sets the note of this AddDetailsToAlertPayload.
Additional note that will be added while creating the alert # noqa: E501
:param note: The note of this AddDetailsToAlertPayload. # noqa: E501
:type: str
"""
self._note = note
@property
def source(self):
"""Gets the source of this AddDetailsToAlertPayload. # noqa: E501
Source field of the alert. Default value is IP address of the incoming request # noqa: E501
:return: The source of this AddDetailsToAlertPayload. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this AddDetailsToAlertPayload.
Source field of the alert. Default value is IP address of the incoming request # noqa: E501
:param source: The source of this AddDetailsToAlertPayload. # noqa: E501
:type: str
"""
self._source = source
@property
def details(self):
"""Gets the details of this AddDetailsToAlertPayload. # noqa: E501
Key-value pairs to add as custom property into alert. # noqa: E501
:return: The details of this AddDetailsToAlertPayload. # noqa: E501
:rtype: dict(str, str)
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this AddDetailsToAlertPayload.
Key-value pairs to add as custom property into alert. # noqa: E501
:param details: The details of this AddDetailsToAlertPayload. # noqa: E501
:type: dict(str, str)
"""
if details is None:
raise ValueError("Invalid value for `details`, must not be `None`") # noqa: E501
self._details = details
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddDetailsToAlertPayload):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"zafer@opsgenie.com"
] |
zafer@opsgenie.com
|
50a0e0a631826408e3f3cd6fd38ce599131e4588
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2506/60832/280621.py
|
f1113858470ab2074adaab3a1b0c3b72695ba34b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
import numpy as np
ar = list(map(int, input().split(',')))
length = len(ar)
if length == 0:
print(0)
exit()
Max = np.zeros(length)
opt = np.zeros(length)
opt[0] = 1
Max[0] = ar[0]
for i in range(1, length):
a = opt[i - 1]
temp = ar[i]
j = i - 1
has = False
for j in range(i - 1, -1, -1):
if Max[j] < temp:
has = True
break
if has:
b = opt[j] + 1
else:
b = 1
if b > a:
Max[i] = ar[i]
opt[i] = b
elif b == a:
Max[i] = min(Max[i - 1], ar[i])
opt[i] = b
else:
Max[i] = Max[i - 1]
opt[i] = a
print(int(opt[length - 1]))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d5edadbe66cf157c9e28ddd27acc016d3102d6ac
|
abef98cfa3fb2c4626eb8c0a77c1080992d9b11b
|
/python/bindiff/bindiff.py
|
00885d7c9351425c339702dff4eee4738485d185
|
[] |
no_license
|
mikebentley15/sandbox
|
ff88ed9dc4b9ac37668142a319d0a8162e88e9e3
|
4f5869544de18be21f415a9d6f9b71c362307f27
|
refs/heads/main
| 2023-04-14T00:22:34.623441
| 2023-03-24T21:43:56
| 2023-03-24T21:43:56
| 116,987,549
| 6
| 3
| null | 2022-10-26T03:02:06
| 2018-01-10T17:14:54
|
C++
|
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
#!/usr/bin/env python3
'''
Diff two binary files byte by byte. Do not try to do insertions or deletions,
just a straight side-by-side comparison.
'''
import sys
import argparse
def parse_args(arguments):
'Parse and return parsed arguments'
parser = argparse.ArgumentParser(
description='''
Diff two binary files byte-by-byte. This is a simple comparison
operation, so no attempts to align based on insertions or
deletions, just a straight side-by-side comparison.
''')
parser.add_argument('file1')
parser.add_argument('file2')
args = parser.parse_args(arguments)
return args
def count_byte_diffs(file1, file2):
'Return # bytes different between file1 and file2 side-by-side'
diff_bytes = 0
with open(file1, 'rb') as fin1:
with open(file2, 'rb') as fin2:
while True:
c1 = fin1.read(1)
c2 = fin2.read(1)
# Handle end of file
if c1 == bytes():
return diff_bytes + len(c2) + len(fin2.read())
if c2 == bytes():
return diff_bytes + len(c1) + len(fin1.read())
# Diff
if c1 != c2:
diff_bytes += 1
def main(arguments):
'Main logic here'
args = parse_args(arguments)
diff_bytes = count_byte_diffs(args.file1, args.file2)
print(diff_bytes, 'bytes are different')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"mikebentley15@gmail.com"
] |
mikebentley15@gmail.com
|
a0acdd42e55260598a360131a282d5f7852e0d57
|
ef187d259d33e97c7b9ed07dfbf065cec3e41f59
|
/work/atcoder/abc/abc054/D/answers/105471_s484.py
|
e511180c2fae7d4bf9960eae07c6d72b6540daf4
|
[] |
no_license
|
kjnh10/pcw
|
847f7295ea3174490485ffe14ce4cdea0931c032
|
8f677701bce15517fb9362cc5b596644da62dca8
|
refs/heads/master
| 2020-03-18T09:54:23.442772
| 2018-07-19T00:26:09
| 2018-07-19T00:26:09
| 134,586,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
N, Ma, Mb = map(int, input().split())
G = []
for i in range(N):
a, b, c = map(int, input().split())
G.append( (a,b,c) )
M = 1 + min(sum([ a for a, b, c in G ]),
sum([ b for a, b, c in G ]) )
INF = 1000000000
dp = [INF] * (M * M)
dp[0] = 0
for a, b, c in G:
i = M - a - 1
while i >= 0:
j = M - b - 1
while j >= 0:
dp[(i+a)*M+(j+b)] = min(dp[(i+a)*M+(j+b)], dp[i*M+j] + c)
j -= 1
i -= 1
ans = INF
x = 1
while Ma * x < M and Mb * x < M:
ans = min(ans, dp[Ma * x * M + Mb * x])
x += 1
if ans >= INF:
ans = -1
print(ans)
|
[
"kojinho10@gmail.com"
] |
kojinho10@gmail.com
|
1b2863c931ac97cc512170299b6e7d5844ead205
|
add72f4d6f9f7af1f437d19213c14efb218b2194
|
/icekit/page_types/author/tests.py
|
afcd6534c4231a59e310a4f281e7a4b7faa61d11
|
[
"MIT"
] |
permissive
|
ic-labs/django-icekit
|
6abe859f97c709fcf51207b54778501b50436ff7
|
c507ea5b1864303732c53ad7c5800571fca5fa94
|
refs/heads/develop
| 2022-08-08T21:26:04.144852
| 2018-01-08T02:55:17
| 2018-01-08T02:55:17
| 65,470,395
| 53
| 12
|
MIT
| 2022-07-06T19:59:39
| 2016-08-11T13:11:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from django_webtest import WebTest
from fluent_contents.models import Placeholder
from . import models
User = get_user_model()
class AuthorTests(WebTest):
def setUp(self):
self.staff_1 = User.objects.create(
email='test@test.com',
is_staff=True,
is_active=True,
is_superuser=True,
)
# used to make the author's URL
self.author_listing = models.AuthorListing.objects.create(
author=self.staff_1,
title="Authors",
slug="authors",
)
self.author_1 = G(models.Author)
self.author_2 = G(models.Author)
def test_get_absolute_url(self):
self.assertEqual(
self.author_1.get_absolute_url(),
'/authors/%s/' % (
self.author_1.slug
)
)
def test_admin(self):
admin_app_list = (
('icekit_authors_author', self.author_1),
)
for admin_app, obj in admin_app_list:
response = self.app.get(
reverse('admin:%s_changelist' % admin_app),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
response = self.app.get(reverse('admin:%s_add' % admin_app), user=self.staff_1)
self.assertEqual(response.status_code, 200)
response = self.app.get(
reverse('admin:%s_history' % admin_app, args=(obj.id,)),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
response = self.app.get(
reverse('admin:%s_delete' % admin_app, args=(obj.id,)),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
response = self.app.get(
reverse('admin:%s_change' % admin_app, args=(obj.id,)),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
|
[
"greg@interaction.net.au"
] |
greg@interaction.net.au
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.