blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
136a39269006d806aaa99a0bac22de80bf3cab16 | 5a0b6cc5f6e0c19503fb4eb713cfa5c8a6f657d1 | /apps/accounts/forms.py | a777ce3364ff9763a8e79ad60147081e419d5410 | [] | no_license | ehoversten/django_user_auth | 5069780a70c3c7ce8707b107907f45e9a98e0583 | 91f5bc75f5bcc95c714f55f6ebcb2b4b50083d47 | refs/heads/master | 2020-04-02T03:15:27.770591 | 2018-10-21T23:47:48 | 2018-10-21T23:47:48 | 153,954,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True)
# you can set extra validations here to prevent is_valid from succeeding f you don't want it to.
first_name = forms.CharField(max_length=30,required=True)
last_name = forms.CharField(max_length=30,required=True)
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
def save(self, commit=True):
user = super(UserCreateForm, self).save(commit=False)
# let's say we wanted to make our data all caps, we could do that here!
user.email = self.cleaned_data["email"]
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
if commit:
user.save()
return user
| [
"ehoversten@gmail.com"
] | ehoversten@gmail.com |
97e39489a1008862592dc919c505fe0a8e088228 | d346c1e694e376c303f1b55808d90429a1ad3c3a | /medium/284.Peeking_Iterator.py | 1fc3679df11e721e85b5a1d2040e23cc759c49a1 | [] | no_license | littleliona/leetcode | 3d06bc27c0ef59b863a2119cd5222dc94ed57b56 | 789d8d5c9cfd90b872be4a4c35a34a766d95f282 | refs/heads/master | 2021-01-19T11:52:11.938391 | 2018-02-19T03:01:47 | 2018-02-19T03:01:47 | 88,000,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,338 | py | # Below is the interface for Iterator, which is already defined for you.
#
class Iterator(object):
def __init__(self, nums):
"""
Initializes an iterator object to the beginning of a list.
:type nums: List[int]
"""
self.L = []
for i,num in enumerate(nums):
def hasNext(self):
"""
Returns true if the iteration has more elements.
:rtype: bool
"""
def next(self):
"""
Returns the next element in the iteration.
:rtype: int
"""
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
def next(self):
"""
:rtype: int
"""
def hasNext(self):
"""
:rtype: bool
"""
# Your PeekingIterator object will be instantiated and called as such:
iter = PeekingIterator(Iterator(nums))
while iter.hasNext():
val = iter.peek() # Get the next element but not advance the iterator.
iter.next() # Should return the same value as [val].
| [
"aria@Arias-MacBook-Pro.local"
] | aria@Arias-MacBook-Pro.local |
c5a1e157aea842650f2144ec231bb7166234b266 | 4bfc3c184e736bb68dccbb6d5657f11c950df002 | /tests/operators/vector/test_minimum_ad_001.py | 7d7952f9c327d7e59295b101f0f6af6ed9cc7880 | [
"Apache-2.0",
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] | permissive | laekov/akg | 159aa64ef6135222b5af784c408731275dfa9bdb | 5316b8cb2340bbf71bdc724dc9d81513a67b3104 | refs/heads/master | 2022-12-01T04:09:03.548063 | 2020-08-19T08:38:57 | 2020-08-19T08:41:28 | 288,678,192 | 0 | 0 | Apache-2.0 | 2020-08-19T08:41:30 | 2020-08-19T08:36:53 | Python | UTF-8 | Python | false | false | 2,560 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from base import TestBase
from test_run.minimum_ad_run import minimum_ad_run
class TestCase(TestBase):
def setup(self):
"""set test case """
case_name = "test_minimum_ad_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= %s Setup case============", self.casename)
self.testarg = [
# testflag,opfuncname,testRunArgs, setdimArgs
("minimum_ad_001", minimum_ad_run, ((2, 2, 2), "int32", True, True)),
("minimum_ad_002", minimum_ad_run, ((2, 2), "float16", True, False)),
("minimum_ad_003", minimum_ad_run, ((2, 3, 3, 4), "int32", False, True)),
]
self.testarg_rpc_cloud = [
# testflag,opfuncname,testRunArgs, setdimArgs
("minimum_ad_001", minimum_ad_run, ((2, 3, 3, 4), "float32", False, True)),
("minimum_ad_002", minimum_ad_run, ((2, 2, 1), "float16", True, True)),
("minimum_ad_003", minimum_ad_run, ((2, 3, 3, 4), "int32", False, True)),
("minimum_ad_004", minimum_ad_run, ((16, 16), "float16", True, False)),
("minimum_ad_005", minimum_ad_run, ((8, 16), "int32", True, True)),
]
@pytest.mark.rpc_mini
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
@pytest.mark.rpc_cloud
@pytest.mark.env_onecard
@pytest.mark.platform_x86_ascend_training
def test_run_rpc_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= %s Setup case============", self.casename) | [
"ckey.chengbin@huawei.com"
] | ckey.chengbin@huawei.com |
3059be9fd2c16b15f1b6fae6da39e0af08430466 | 1f7847055332e16614f5358f0ec39b39bb9a66a7 | /exercises/14_generators/test_task_14_1a.py | 4b50c494e9e7b9b19e0aae6f7a719e0deb3d277b | [] | no_license | satperm/advpyneng-examples-exercises | 6641dae31fa7f44db7e99547bc70d740988f21b9 | 6b12c320cace1d303dae38ddba9b19550a8708ec | refs/heads/master | 2022-12-14T09:28:48.255804 | 2020-09-06T14:14:42 | 2020-09-06T14:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,909 | py | import time
import pytest
import task_14_1a
from collections.abc import Generator
import sys
sys.path.append('..')
from common_functions import check_function_exists, check_function_params
def test_func_created():
'''Проверяем, что функция создана'''
check_function_exists(task_14_1a, 'get_intf_ip')
def test_get_intf_ip_is_generator():
return_value = task_14_1a.get_intf_ip('config_r1.txt')
assert isinstance(return_value, Generator), "Надо создать генератор"
def test_get_intf_ip_yield_value():
return_value = task_14_1a.get_intf_ip('config_r1.txt')
all_results = list(return_value)
assert ('Loopback0', '10.1.1.1', '255.255.255.255') in all_results, "Функция вернула неправильный результат"
def test_get_intf_ip_new_file(tmpdir):
config = (
'!\n'
'!\n'
'interface Loopback0\n'
' ip address 192.168.10.1 255.255.255.255\n'
'!\n'
'interface Ethernet0/1\n'
' no ip address\n'
'!\n'
'interface Ethernet0/2\n'
' description To P_r9 Ethernet0/2\n'
' ip address 192.168.20.1 255.255.255.0\n'
' mpls traffic-eng tunnels\n'
'!\n'
'ip access-list standard LDP\n'
' permit 192.168.20.0 0.0.0.255\n'
'!\n')
correct_results = sorted([
('Loopback0', '192.168.10.1', '255.255.255.255'),
('Ethernet0/2', '192.168.20.1', '255.255.255.0')])
# записываем строку config во временный файл
dest_filename = tmpdir.mkdir("test_tasks").join("task_14_1a.txt")
dest_filename.write(config)
# проверяем результат
return_value = task_14_1a.get_intf_ip(dest_filename)
assert sorted(return_value) == correct_results, "Функция вернула неправильный результат"
| [
"nataliya.samoylenko@gmail.com"
] | nataliya.samoylenko@gmail.com |
efcca2ae4659bbb38deac84032a8a3aa80631773 | e471252b7259bdeed3b84187fbdfc95b18e6fc78 | /tests/rs_test.py | 3c274d99bf1e9b6b3e2c21a2c8278088b2797c5d | [
"MIT"
] | permissive | sibowi/e3nn | 4b98e2ec77680527cf6f15ac2f4567357cd516db | 1c568226a065a9c5c7127a22f6092ecd4cd0dc2f | refs/heads/master | 2022-08-30T17:36:17.475736 | 2020-05-23T23:04:49 | 2020-05-23T23:04:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,581 | py | # pylint: disable=not-callable, no-member, invalid-name, line-too-long, wildcard-import, unused-wildcard-import, missing-docstring, protected-access
import pytest
import torch
from e3nn import o3, rs
from e3nn.non_linearities.norm import Norm
def test_convention():
Rs = [0]
assert rs.convention(Rs) == [(1, 0, 0)]
Rs = [0, (2, 0)]
assert rs.convention(Rs) == [(1, 0, 0), (2, 0, 0)]
def test_simplify():
Rs = [(1, 0), 0, (1, 0)]
assert rs.simplify(Rs) == [(3, 0, 0)]
def test_irrep_dim():
Rs = [(1, 0), (3, 1), (2, 2)]
assert rs.irrep_dim(Rs) == 1 + 3 + 5
Rs = [(1, 0), (3, 0), (2, 0)]
assert rs.irrep_dim(Rs) == 1 + 1 + 1
def test_mul_dim():
Rs = [(1, 0), (3, 1), (2, 2)]
assert rs.mul_dim(Rs) == 6
Rs = [(1, 0), (3, 0), (2, 0)]
assert rs.mul_dim(Rs) == 6
def test_dim():
Rs = [(1, 0), (3, 1), (2, 2)]
assert rs.dim(Rs) == 1 * 1 + 3 * 3 + 2 * 5
Rs = [(1, 0), (3, 0), (2, 0)]
assert rs.dim(Rs) == 1 * 1 + 3 * 1 + 2 * 1
def test_map_irrep_to_Rs():
with o3.torch_default_dtype(torch.float64):
Rs = [(3, 0)]
mapping_matrix = rs.map_irrep_to_Rs(Rs)
assert torch.allclose(mapping_matrix, torch.ones(3, 1))
Rs = [(1, 0), (1, 1), (1, 2)]
mapping_matrix = rs.map_irrep_to_Rs(Rs)
assert torch.allclose(mapping_matrix, torch.eye(1 + 3 + 5))
def test_map_mul_to_Rs():
with o3.torch_default_dtype(torch.float64):
Rs = [(3, 0)]
mapping_matrix = rs.map_mul_to_Rs(Rs)
assert torch.allclose(mapping_matrix, torch.eye(3))
Rs = [(1, 0), (1, 1), (1, 2)]
mapping_matrix = rs.map_mul_to_Rs(Rs)
check_matrix = torch.zeros(1 + 3 + 5, 3)
check_matrix[0, 0] = 1.
check_matrix[1:4, 1] = 1.
check_matrix[4:, 2] = 1.
assert torch.allclose(mapping_matrix, check_matrix)
############################################################################
def test_elementwise_tensor_product():
torch.set_default_dtype(torch.float64)
Rs_1 = [(3, 0), (2, 1), (5, 2)]
Rs_2 = [(1, 0), (2, 1), (2, 2), (2, 0), (2, 1), (1, 2)]
Rs_out, m = rs.elementwise_tensor_product(Rs_1, Rs_2)
mul = rs.ElementwiseTensorProduct(Rs_1, Rs_2)
x1 = torch.randn(1, rs.dim(Rs_1))
x2 = torch.randn(1, rs.dim(Rs_2))
y1 = mul(x1, x2)
y2 = torch.einsum('zi,zj->ijz', x1, x2)
y2 = m @ y2.reshape(-1, y2.shape[2])
y2 = y2.T
assert rs.dim(Rs_out) == y1.shape[1]
assert (y1 - y2).abs().max() < 1e-10
############################################################################
def test_tensor_square_equivariance():
with o3.torch_default_dtype(torch.float64):
Rs_in = [(3, 0), (2, 1), (5, 2)]
sq = rs.TensorSquare(Rs_in, o3.selection_rule)
x = rs.randn(Rs_in)
abc = o3.rand_angles()
D_in = rs.rep(Rs_in, *abc)
D_out = rs.rep(sq.Rs_out, *abc)
y1 = sq(D_in @ x)
y2 = D_out @ sq(x)
assert (y1 - y2).abs().max() < 1e-10
def test_tensor_square_norm():
for Rs_in in [[(1, 0), (1, 1)]]:
with o3.torch_default_dtype(torch.float64):
Rs_out, Q = rs.tensor_square(Rs_in, o3.selection_rule, normalization='component', sorted=True)
I1 = (Q @ Q.t()).to_dense()
I2 = torch.eye(rs.dim(Rs_out))
d = (I1 - I2).pow(2).mean().sqrt()
assert d < 1e-10
############################################################################
def test_format():
assert rs.format_Rs([]) == ""
assert rs.format_Rs([2]) == "2"
############################################################################
def test_tensor_product_equal_TensorProduct():
with o3.torch_default_dtype(torch.float64):
Rs_1 = [(3, 0), (2, 1), (5, 2)]
Rs_2 = [(1, 0), (2, 1), (2, 2), (2, 0), (2, 1), (1, 2)]
Rs_out, m = rs.tensor_product(Rs_1, Rs_2, o3.selection_rule, sorted=True)
mul = rs.TensorProduct(Rs_1, Rs_2, o3.selection_rule)
x1 = rs.randn(1, Rs_1)
x2 = rs.randn(1, Rs_2)
y1 = mul(x1, x2)
y2 = torch.einsum('zi,zj->ijz', x1, x2)
y2 = (m @ y2.reshape(rs.dim(Rs_1) * rs.dim(Rs_2), -1)).T
assert rs.dim(Rs_out) == y1.shape[1]
assert (y1 - y2).abs().max() < 1e-10 * y1.abs().max()
def test_tensor_product_to_dense():
with o3.torch_default_dtype(torch.float64):
Rs_1 = [(3, 0), (2, 1), (5, 2)]
Rs_2 = [(1, 0), (2, 1), (2, 2), (2, 0), (2, 1), (1, 2)]
mul = rs.TensorProduct(Rs_1, Rs_2, o3.selection_rule)
assert mul.to_dense().shape == (rs.dim(mul.Rs_out), rs.dim(Rs_1), rs.dim(Rs_2))
def test_tensor_product_symmetry():
with o3.torch_default_dtype(torch.float64):
Rs_in = [(3, 0), (2, 1), (5, 2)]
Rs_out = [(1, 0), (2, 1), (2, 2), (2, 0), (2, 1), (1, 2)]
mul1 = rs.TensorProduct(Rs_in, o3.selection_rule, Rs_out)
mul2 = rs.TensorProduct(o3.selection_rule, Rs_in, Rs_out)
assert mul1.Rs_in2 == mul2.Rs_in1
x = torch.randn(rs.dim(Rs_in), rs.dim(mul1.Rs_in2))
y1 = mul1(x)
y2 = mul2(x.T)
assert (y1 - y2).abs().max() < 1e-10
def test_tensor_product_left_right():
with o3.torch_default_dtype(torch.float64):
Rs_1 = [(3, 0), (2, 1), (5, 2)]
Rs_2 = [(1, 0), (2, 1), (2, 2), (2, 0), (2, 1), (1, 2)]
mul = rs.TensorProduct(Rs_1, Rs_2, o3.selection_rule)
x1 = rs.randn(2, Rs_1)
x2 = rs.randn(2, Rs_2)
y0 = mul(x1, x2)
y1 = mul(torch.einsum('zi,zj->zij', x1, x2))
assert (y0 - y1).abs().max() < 1e-10 * y0.abs().max()
mul._complete = 'in1'
y1 = mul(x1, x2)
assert (y0 - y1).abs().max() < 1e-10 * y0.abs().max()
mul._complete = 'in2'
y1 = mul(x1, x2)
assert (y0 - y1).abs().max() < 1e-10 * y0.abs().max()
@pytest.mark.parametrize('Rs_in1, Rs_in2', [([(1, 0)], [(2, 0)]), ([(3, 1), (2, 2)], [(2, 0), (1, 1), (1, 3)])])
def test_tensor_product_in_in_normalization(Rs_in1, Rs_in2):
with o3.torch_default_dtype(torch.float64):
Rs_out, Q = rs.tensor_product(Rs_in1, Rs_in2, o3.selection_rule)
n = rs.dim(Rs_out)
I = torch.eye(n)
d = ((Q @ Q.t()).to_dense() - I).pow(2).mean().sqrt()
assert d < 1e-10
d = ((Q.t() @ Q).to_dense() - I).pow(2).mean().sqrt()
assert d < 1e-10
@pytest.mark.parametrize('Rs_in1, Rs_in2', [([0], [0]), ([4, 2], [3, 4])])
def test_tensor_product_in_in_normalization_norm(Rs_in1, Rs_in2):
with o3.torch_default_dtype(torch.float64):
tp = rs.TensorProduct(Rs_in1, Rs_in2, o3.selection_rule, normalization='norm')
x1 = rs.randn(10, Rs_in1, normalization='norm')
x2 = rs.randn(10, Rs_in2, normalization='norm')
n = Norm(tp.Rs_out, normalization='norm')
x = n(tp(x1, x2)).mean(0)
assert (x.log10().abs() < 1).all()
@pytest.mark.parametrize('Rs_in1, Rs_out', [([(1, 0)], [(2, 0)]), ([(3, 1), (2, 2)], [(2, 0), (1, 1), (1, 3)])])
def test_tensor_product_in_out_normalization(Rs_in1, Rs_out):
with o3.torch_default_dtype(torch.float64):
n = rs.dim(Rs_out)
I = torch.eye(n)
_, Q = rs.tensor_product(Rs_in1, o3.selection_rule, Rs_out)
d = ((Q @ Q.t()).to_dense() - I).pow(2).mean().sqrt()
assert d < 1e-10
_, Q = rs.tensor_product(o3.selection_rule, Rs_in1, Rs_out)
d = ((Q @ Q.t()).to_dense() - I).pow(2).mean().sqrt()
assert d < 1e-10
############################################################################
def test_reduce_tensor_Levi_Civita_symbol():
Rs, Q = rs.reduce_tensor('ijk=-ikj=-jik', i=[(1, 1)])
assert Rs == [(1, 0, 0)]
r = o3.rand_angles()
D = o3.irr_repr(1, *r)
Q = Q.reshape(3, 3, 3)
Q1 = torch.einsum('li,mj,nk,ijk', D, D, D, Q)
assert (Q1 - Q).abs().max() < 1e-10
def test_reduce_tensor_antisymmetric_L2():
Rs, Q = rs.reduce_tensor('ijk=-ikj=-jik', i=[(1, 2)])
assert Rs[0] == (1, 1, 0)
q = Q[:3].reshape(3, 5, 5, 5)
r = o3.rand_angles()
D1 = o3.irr_repr(1, *r)
D2 = o3.irr_repr(2, *r)
Q1 = torch.einsum('il,jm,kn,zijk->zlmn', D2, D2, D2, q)
Q2 = torch.einsum('yz,zijk->yijk', D1, q)
assert (Q1 - Q2).abs().max() < 1e-10
assert (q + q.transpose(1, 2)).abs().max() < 1e-10
assert (q + q.transpose(1, 3)).abs().max() < 1e-10
assert (q + q.transpose(3, 2)).abs().max() < 1e-10
def test_reduce_tensor_elasticity_tensor():
Rs, _Q = rs.reduce_tensor('ijkl=jikl=klij', i=[(1, 1)])
assert rs.dim(Rs) == 21
| [
"geiger.mario@gmail.com"
] | geiger.mario@gmail.com |
8d66c48f1cf17ed6e117de35119616e410c9e269 | 3b2867636844ab4b402ef091b61222a5870bae6e | /year2017/day14.py | 765292b38d049a9d76ff935743617f5776e13d1b | [] | no_license | feigaoxyz/adventofcode | f992478ff6518930a60b9d4e15e5902d1f208f06 | 18918e16709eef833544f48d8c1a46c93d950000 | refs/heads/master | 2021-06-21T18:33:45.229440 | 2021-01-12T16:10:03 | 2021-01-12T16:10:03 | 51,060,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | from common import load_input
from day10 import knot_hash_full
PART1_DOC = """Part 1:
For i in 0..127, how many '1' in 128 knot hashes (day 10) of
strings "INPUT-$i"?
"""
PART2_DOC = """Part 2:
Return number of connected components (4-direction) of 1's
"""
def disk_grid_recover(raw: str) -> list:
grid = []
for i in range(128):
hash = knot_hash_full('{}-{}'.format(raw, i))
grid.append([int(c) for c in '{:0>128b}'.format(int('0x' + hash, 16))])
return grid
def fn_p1(raw):
return sum(map(sum, disk_grid_recover(raw)))
def fn_p2(raw):
grid = disk_grid_recover(raw)
return len(connected_components(grid))
def connected_components(grid):
remain: set = set([(r, c)
for r in range(len(grid)) for c in range(len(grid[r]))
if grid[r][c] == 1])
marked: set = set()
ccs = []
while remain:
ccs.append(set())
p = remain.pop()
working = {p}
while working:
t = working.pop()
marked.add(t)
ccs[-1].add(t)
for dr, dc in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
r, c = t[0] + dr, t[1] + dc
if (r, c) in remain:
remain.remove((r, c))
working.add((r, c))
# print(len(marked), len(ccs))
return ccs
def test_connected_component():
assert len(connected_components([[1, 0], [0, 1]])) == 2
assert len(connected_components([[1, 1], [0, 1]])) == 1
assert len(connected_components([[1, 1, 1], [0, 0, 1], [0, 1, 1]])) == 1
pass
if __name__ == '__main__':
example = """flqrgnkx
""".strip()
input_data = load_input(__file__.split('.')[0] + '_in.txt').strip()
# print("Part 1 example:", fn_p1(example)) # 8108
# print("Part 1:", fn_p1(input_data)) # 8216
print("Part 2 example:", fn_p2(example)) # 1242
print("Part 2:", fn_p2(input_data)) # 1139
| [
"contact@feigao.org"
] | contact@feigao.org |
0603ccdfdf7026c83b1b6adbeb8206cdd97687ee | 69371d185a807c2754c460e7c7ccf8debe1bf384 | /src/robot/parsing/lexerwrapper.py | 97cc47e501426872e8e91563105988f3798536ef | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | eternalconcert/robotframework | bf5b8df519642fe383ba82f15f2b4c4f467a5c5e | 802f6a4986a34a6f64f7b48467d0a38f2b14fdd8 | refs/heads/master | 2020-07-17T09:38:35.126356 | 2019-09-04T20:13:47 | 2019-09-04T20:13:47 | 205,995,467 | 2 | 0 | NOASSERTION | 2019-09-03T05:25:15 | 2019-09-03T05:25:14 | null | UTF-8 | Python | false | false | 2,301 | py | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from robot.errors import DataError
from robot.output import LOGGER
from robot.utils import Utf8Reader, get_error_message
from .restreader import read_rest
PROCESS_CURDIR = True
class LexerWrapper(object):
def __init__(self, lexer, source):
self.source = source
self.curdir = os.path.dirname(source).replace('\\', '\\\\')
lexer.input(self._read(source))
self.tokens = lexer.get_tokens()
def _read(self, path):
try:
# IronPython handles BOM incorrectly if not using binary mode:
# https://ironpython.codeplex.com/workitem/34655
with open(path, 'rb') as data:
if os.path.splitext(path)[1].lower() in ('.rest', '.rst'):
return read_rest(data)
return Utf8Reader(data).read()
except:
raise DataError(get_error_message())
def token(self):
"""Adapter for yacc.yacc"""
token = next(self.tokens, None)
if token and token.type == token.ERROR:
self._report_error(token)
return self._next_token_after_eos()
if token and '${CURDIR}' in token.value and PROCESS_CURDIR:
token.value = token.value.replace('${CURDIR}', self.curdir)
return token
def _report_error(self, token):
# TODO: add line number
LOGGER.error("Error in file '%s': %s" % (self.source, token.error))
def _next_token_after_eos(self):
while True:
token = self.token()
if token is None:
return None
if token.type == token.EOS:
return self.token()
| [
"peke@iki.fi"
] | peke@iki.fi |
80e3a142476b97deeeaf6c8e255fab037dd909ea | c9f090a34b145450dc1a88e4b847d552cdbcdcd1 | /libre/apps/data_drivers/literals.py | 14bcde3f8591592f485f505231bee54b83862ba7 | [] | no_license | axelrivera/libre | 0006853ec8f2a23f63c25136d0bbd93ba830be0c | cd6f474946f8f09b3b93c08b32aed93ce04b5ffb | refs/heads/master | 2020-12-25T06:12:37.535782 | 2013-07-05T01:47:29 | 2013-07-05T01:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # Row based
DEFAULT_LIMIT = 100
# Excel
DEFAULT_FIRST_ROW_NAMES = False
DEFAULT_SHEET = '0'
| [
"roberto.rosario.gonzalez@gmail.com"
] | roberto.rosario.gonzalez@gmail.com |
1ffd905a0dc4a4c67f0015ba2c5379758054c570 | f281d0d6431c1b45c6e5ebfff5856c374af4b130 | /DAY100~199/DAY148-LEET1011-CapacityToShipPackagesWithinDDays/joohyuk.py | 2fff9fa5c66fb2651695712afd4d3d1c98716ac2 | [] | no_license | tachyon83/code-rhino | ec802dc91dce20980fac401b26165a487494adb4 | b1af000f5798cd12ecdab36aeb9c7a36f91c1101 | refs/heads/master | 2022-08-13T09:10:16.369287 | 2022-07-30T11:27:34 | 2022-07-30T11:27:34 | 292,142,812 | 5 | 6 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | import sys
si = sys.stdin.readline
def solve(weights,d):
t=sum(weights)
s,e=0,t
while s<e:
m=(s+e)//2
hold,c=0,0
for e in weights:
if hold<m:
hold+=e
else:
hold=e
c+=1
if c<d:
e=m
else:
s=m
print(s)
| [
"noreply@github.com"
] | tachyon83.noreply@github.com |
3e34d6908814684cc49460148b39eafc26c44e9a | 53c35060ba641c399d80ed1daf6d3b1e5c49dab4 | /enso/contrib/open/__init__.py | e6fe28def3f0708d4cba41225fd35eb9363a0997 | [
"BSD-2-Clause"
] | permissive | RendijsSmukulis/enso-launcher-continued | c85462a36637a70985938a39c2d3448def8a9af2 | 97b21f4eabbd445c33c6e93c19e7b2a712ea2bca | refs/heads/master | 2021-01-23T03:22:02.651360 | 2017-03-19T03:02:25 | 2017-03-19T03:02:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,100 | py | # -*- coding: utf-8 -*-
# vim:set tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
#
# Author : Pavel Vitis "blackdaemon"
# Email : blackdaemon@seznam.cz
#
# Copyright (c) 2010, Pavel Vitis <blackdaemon@seznam.cz>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.contrib.open
#
# ----------------------------------------------------------------------------
"""
An Enso plugin providing the 'open', 'open with', 'learn as open',
'unlearn open', 'undo unlearn open' commands.
This is main class implementing all basic command functionality, without
platform specific code.
For platform specific code, platform command implementation is called.
See open_command_impl initialization in load():
global open_command_impl
# Import platform specific implementation class
# This imports and initializes
# enso.contrib.open.platform.<platform_name>.OpenCommandImpl class:
open_command_impl = enso.contrib.platform.get_command_platform_impl("open")()
And then for platform specific task, methods of open_command_impl class are
called:
open_command_impl.save_shortcut()
To tweak platform-specific code, see the implementations of OpenCommandImpl
class in open-command platform directories:
enso.contrib.open.platform.win32
enso.contrib.open.platform.osx
enso.contrib.open.platform.linux
TODO:
* Implement OSX variant
* Open multiple files. Special text file .enrosun should be created
in the LEARN_AS_DIR with the list of files to open(?)
Or maybe create subdirectory in LEARN_AS_DIR and put multiple links there.
* It should be possible to unlearn even any of desktop/startmenu/quicklaunch
shortcuts. But we do not want to invasively remove items from desktop/
startmenu/quicklaunch on unlearn.
Implement this using LEARN_AS_DIR/.unlearned subdirectory to remember
such unlearned shortcuts.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
# Future imports
from __future__ import with_statement
# Imports
import logging
import os
import sys
from xml.sax.saxutils import escape as xml_escape
# Enso imports
import enso.contrib.platform
from enso.commands import CommandManager, CommandObject
from enso.commands.factories import ArbitraryPostfixFactory, GenericPrefixFactory
from enso.contrib.open import shortcuts, utils
from enso.contrib.scriptotron.ensoapi import EnsoApi
from enso.contrib.scriptotron.tracebacks import safetyNetted
from enso.events import EventManager
from enso.messages import displayMessage as display_xml_message
from enso.utils.memoize import memoized
logger = logging.getLogger('enso.contrib.open')
# Platform specific command-implementation class. This is initialized in load().
open_command_impl = None
recent_command_impl = None
ensoapi = EnsoApi()
# ----------------------------------------------------------------------------
# Utility functions
# ---------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# LearnAsOpen command
# ---------------------------------------------------------------------------
class LearnAsOpenCommand(CommandObject):
""" Learns to open a document or application as {name} """
def __init__(self, postfix=None):
super(LearnAsOpenCommand, self).__init__()
self.name = postfix
@safetyNetted
def run(self):
seldict = ensoapi.get_selection()
if seldict.get('files'):
#TODO: Handle opening of multiple files
filename = seldict['files'][0]
elif seldict.get('text'):
filename = seldict['text'].strip()
else:
ensoapi.display_message(u"No file is selected")
return
if self.name is None:
try:
from enso.contrib.open.platform.win32.utils import get_exe_name
except ImportError:
pass
else:
product_name = get_exe_name(filename)
if product_name:
self.name = product_name.lower()
if self.name is None:
ensoapi.display_message(u"You must provide name")
return
if (not os.path.isfile(filename) and
not os.path.isdir(filename) and
not open_command_impl._is_url(filename)):
ensoapi.display_message(
u"Selection is neither file, folder nor URL.")
return
shortcut = open_command_impl.add_shortcut(self.name, filename)
if shortcut:
display_xml_message(
u"<p><command>open %s</command> is now a command</p>"
% xml_escape(self.name)
)
else:
display_xml_message(
u"<p><command>open %s</command> already exists. Please choose another name.</p>"
% xml_escape(self.name)
)
return
# ----------------------------------------------------------------------------
# Open command
# ---------------------------------------------------------------------------
class OpenCommand(CommandObject):
""" Opens application, file or folder referred by given name """
def __init__(self, postfix=None):
super(OpenCommand, self).__init__()
self.target = postfix
@safetyNetted
def run(self):
#TODO: Implement opening current selection if no postfix provided?
if not self.target:
return
display_xml_message(
u"<p>Opening <command>%s</command>...</p>"
% xml_escape(self.target)
)
open_command_impl.run_shortcut(self.target)
# ----------------------------------------------------------------------------
# OpenWith command
# ---------------------------------------------------------------------------
class OpenWithCommand(CommandObject):
""" Opens your currently selected file(s) or folder with the specified application """
def __init__(self, postfix=None):
super(OpenWithCommand, self).__init__()
self.target = postfix
@safetyNetted
def run(self):
seldict = ensoapi.get_selection()
if seldict.get('files'):
files = seldict['files']
elif seldict.get('text'):
text = seldict['text'].strip("\r\n\t\0 ").replace("\r", "\n").replace("\n\n", "\n")
files = (file_name for file_name in text.split("\n"))
files = [file_name for file_name in files if os.path.isfile(file_name) or os.path.isdir(file_name)]
else:
files = []
if len(files) == 0:
ensoapi.display_message(u"No file or folder is selected")
return
open_command_impl.open_with_shortcut(self.target, files)
# ----------------------------------------------------------------------------
# UnlearnOpen command
# ---------------------------------------------------------------------------
class UnlearnOpenCommand(CommandObject):
"""
Unlearn "open {name}" command
"""
def __init__(self, postfix=None):
super(UnlearnOpenCommand, self).__init__()
self.target = postfix
@safetyNetted
def run(self):
try:
open_command_impl.remove_shortcut(self.target)
except Exception:
display_xml_message(u"<p>This shortcut can't be unlearned</p>")
else:
display_xml_message(u"<p>Unlearned <command>open %s</command></p>" % self.target)
# ----------------------------------------------------------------------------
# UndoUnlearnOpen command
# ---------------------------------------------------------------------------
class UndoUnlearnOpenCommand(CommandObject):
"""
The "undo unlearn open" command.
"""
NAME = "undo unlearn open"
DESCRIPTION = u"Undoes your last \u201cunlearn open\u201d command."
def __init__(self):
super(UndoUnlearnOpenCommand, self).__init__()
self.setDescription(self.DESCRIPTION)
self.setName(self.NAME)
@safetyNetted
def run(self):
sh = open_command_impl.undo_remove_shortcut()
if sh:
display_xml_message(
u"<p>Undo successful. <command>open %s</command> is now a command</p>"
% sh.name)
else:
ensoapi.display_message(u"There is nothing to undo")
# ----------------------------------------------------------------------------
# Recent command
# ---------------------------------------------------------------------------
class RecentCommand(CommandObject):
""" Opens recent application, file or folder referred by given name """
def __init__(self, postfix=None):
super(RecentCommand, self).__init__()
self.target = postfix
@safetyNetted
def run(self):
#TODO: Implement opening current selection if no postfix provided?
if not self.target:
return
display_xml_message(
u"<p>Opening <command>%s</command>...</p>"
% xml_escape(self.target)
)
recent_command_impl.run_shortcut(self.target)
# ----------------------------------------------------------------------------
# Command factories
# ---------------------------------------------------------------------------
class LearnAsOpenCommandFactory(ArbitraryPostfixFactory):
"""
Generates a "learn as open {name}" command.
"""
HELP_TEXT = "name"
PREFIX = "learn as open "
NAME = "%s{name}" % PREFIX
DESCRIPTION = "Learn to open a document or application as {name}"
def __init__(self):
super(LearnAsOpenCommandFactory, self).__init__()
def _generateCommandObj(self, postfix):
cmd = LearnAsOpenCommand(postfix)
cmd.setDescription(self.DESCRIPTION)
return cmd
class OpenCommandFactory(GenericPrefixFactory):
"""
Generates a "open {name}" command.
"""
HELP = "command"
HELP_TEXT = "command"
PREFIX = "open "
NAME = "%s{name}" % PREFIX
DESCRIPTION = "Continue typing to open an application or document"
def __init__(self):
super(OpenCommandFactory, self).__init__()
self.postfixes_updated_on = 0
def _generateCommandObj(self, parameter=None):
cmd = OpenCommand(parameter)
cmd.setDescription(self.DESCRIPTION)
return cmd
@safetyNetted
def update(self):
shortcuts_dict = open_command_impl.get_shortcuts()
if self.postfixes_updated_on >= shortcuts_dict.updated_on:
return
with utils.Timer("Setting postfixes for 'open' command."):
self.setPostfixes(shortcuts_dict.keys())
self.postfixes_updated_on = shortcuts_dict.updated_on
class OpenWithCommandFactory(GenericPrefixFactory):
"""
Generates a "open with {name}" command.
"""
HELP = "command"
HELP_TEXT = "command"
PREFIX = "open with "
NAME = "%s{name}" % PREFIX
DESCRIPTION = "Opens your currently selected file(s) or folder with the specified application"
def __init__(self):
super(OpenWithCommandFactory, self).__init__()
self.postfixes_updated_on = 0
def _generateCommandObj(self, parameter=None):
cmd = OpenWithCommand(parameter)
cmd.setDescription(self.DESCRIPTION)
return cmd
@safetyNetted
def update(self):
shortcuts_dict = open_command_impl.get_shortcuts()
if self.postfixes_updated_on >= shortcuts_dict.updated_on:
return
with utils.Timer("Setting postfixes for 'open with' command."):
self.setPostfixes(
[s.name for s in shortcuts_dict.values()
if s.type == shortcuts.SHORTCUT_TYPE_EXECUTABLE])
self.postfixes_updated_on = shortcuts_dict.updated_on
class UnlearnOpenCommandFactory(GenericPrefixFactory):
"""
Generates a "unlearn open {name}" command.
"""
HELP = "command"
HELP_TEXT = "command"
PREFIX = "unlearn open "
NAME = "%s{name}" % PREFIX
DESCRIPTION = u" Unlearn \u201copen {name}\u201d command "
def __init__(self):
super(UnlearnOpenCommandFactory, self).__init__()
self.postfixes_updated_on = 0
def _generateCommandObj(self, parameter=None):
cmd = UnlearnOpenCommand(parameter)
cmd.setDescription(self.DESCRIPTION)
return cmd
@safetyNetted
def update(self):
shortcuts_dict = open_command_impl.get_shortcuts()
if self.postfixes_updated_on >= shortcuts_dict.updated_on:
return
with utils.Timer("Setting postfixes for 'unlearn open' command."):
self.setPostfixes(shortcuts_dict.keys())
self.postfixes_updated_on = shortcuts_dict.updated_on
class RecentCommandFactory(GenericPrefixFactory):
"""
Generates a "recent {name}" command.
"""
HELP = "command"
HELP_TEXT = "command"
PREFIX = "recent "
NAME = "%s{name}" % PREFIX
DESCRIPTION = "Continue typing to open recent application or document"
def __init__(self):
super(RecentCommandFactory, self).__init__()
self.postfixes_updated_on = 0
def _generateCommandObj(self, parameter=None):
cmd = RecentCommand(parameter)
cmd.setDescription(self.DESCRIPTION)
return cmd
@safetyNetted
def update(self):
shortcuts_dict = recent_command_impl.get_shortcuts()
if self.postfixes_updated_on >= shortcuts_dict.updated_on:
return
with utils.Timer("Setting postfixes for 'recent' command."):
self.setPostfixes(shortcuts_dict.keys())
self.postfixes_updated_on = shortcuts_dict.updated_on
# ----------------------------------------------------------------------------
# Plugin initialization
# ---------------------------------------------------------------------------
def load():
global open_command_impl, recent_command_impl
# Import platform specific implementation class
# This imports enso.contrib.open.platform.<platform_name>.OpenCommandImpl class.
open_command_impl = enso.contrib.platform.get_command_platform_impl("open")()
try:
recent_command_impl = enso.contrib.platform.get_command_platform_impl("open", "RecentCommandImpl")()
except:
recent_command_impl = None
# Register commands
try:
CommandManager.get().registerCommand(
OpenCommandFactory.NAME,
OpenCommandFactory()
)
CommandManager.get().registerCommand(
OpenWithCommandFactory.NAME,
OpenWithCommandFactory()
)
CommandManager.get().registerCommand(
LearnAsOpenCommandFactory.NAME,
LearnAsOpenCommandFactory()
)
CommandManager.get().registerCommand(
UnlearnOpenCommandFactory.NAME,
UnlearnOpenCommandFactory()
)
CommandManager.get().registerCommand(
UndoUnlearnOpenCommand.NAME,
UndoUnlearnOpenCommand()
)
if recent_command_impl:
CommandManager.get().registerCommand(
RecentCommandFactory.NAME,
RecentCommandFactory()
)
except Exception, e:
logger.critical(repr(e))
# ----------------------------------------------------------------------------
# Doctests
# ---------------------------------------------------------------------------
def test_evaluate():
"""
Set up mock objects:
>>> def mockDisplayMessage( text ):
... print "message: %s" % text
>>> class MockSelection( object ):
... def set( self, seldict ):
... print "set selection: %s" % seldict
Initialize our command with the mock objects:
>>> c = OpenCommand( mockDisplayMessage, MockSelection() )
Ensure that the command works if nothing is selected:
>>> c.run( {} )
message: <p>No code to evaluate!</p>
Ensure that the command works in the general case:
>>> c.run( {'text' : u'5+3'} )
set selection: {'text': u'8'}
Ensure that the command works with syntax errors:
>>> c.run( {'text' : u'5+'} )
message: <p>Error: unexpected EOF while parsing (<selected text>, line 1)</p>
Ensure that the command doesn't allow standard Python builtins to be used:
>>> ec.run( {'text' : u'open("secretfile", "w")'} )
message: <p>Error: name 'open' is not defined</p>
"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"pavelvitis@gmail.com"
] | pavelvitis@gmail.com |
4e60cf2724fa2e5c06ab75f0f32b8a440d656ec1 | cd4bbecc3f713b0c25508d0c5674d9e103db5df4 | /toontown/minigame/TwoDCamera.py | 5e1022d1ec3b0d80cd7d937278f60e318d768b98 | [] | no_license | peppythegod/ToontownOnline | dce0351cfa1ad8c476e035aa3947fdf53de916a6 | 2e5a106f3027714d301f284721382cb956cd87a0 | refs/heads/master | 2020-04-20T05:05:22.934339 | 2020-01-02T18:05:28 | 2020-01-02T18:05:28 | 168,646,608 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.task.Task import Task
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from toontown.minigame import ToonBlitzGlobals
import math
class TwoDCamera(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDCamera')
def __init__(self, camera):
self.notify.debug('Constructing TwoDCamera with %s' % camera)
self.camera = camera
self.cameraSideView = ToonBlitzGlobals.CameraStartingPosition
self.threeQuarterOffset = 2
self.changeFacingInterval = None
self.ivalControllingCamera = False
self.accept('avatarOrientationChanged', self.setupChangeFacingInterval)
def onstage(self):
self.camera.reparentTo(render)
p = self.cameraSideView
self.camera.setPosHpr(render, p[0], p[1], p[2], p[3], p[4], p[5])
self.camera.setX(
render,
base.localAvatar.getX(render) + self.threeQuarterOffset)
def destroy(self):
self.ignore('avatarOrientationChanged')
p = self.cameraSideView
self.camera.setPosHpr(render, p[0], p[1], p[2], p[3], p[4], p[5])
def update(self):
if not self.ivalControllingCamera:
camX = base.localAvatar.getX(render) - math.sin(
base.localAvatar.getH(render) * math.pi /
180) * self.threeQuarterOffset
self.camera.setX(render, camX)
def clearChangeFacingInterval(self):
if self.changeFacingInterval:
self.changeFacingInterval.pause()
del self.changeFacingInterval
self.changeFacingInterval = None
def setupChangeFacingInterval(self, newHeading):
self.clearChangeFacingInterval()
self.newHeading = newHeading
self.changeFacingInterval = LerpFunc(self.myLerpPos, duration=5.0)
self.changeFacingInterval.start()
def myLerpPos(self, t):
self.ivalControllingCamera = True
finalCamX = base.localAvatar.getX(render) - math.sin(
self.newHeading * math.pi / 180) * self.threeQuarterOffset
diffX = finalCamX - self.camera.getX(render)
self.camera.setX(render, self.camera.getX(render) + diffX * t)
if math.fabs(self.camera.getX(render) - finalCamX) < 0.01:
self.notify.debug('giving up camera control')
self.camera.setX(render, finalCamX)
self.ivalControllingCamera = False
self.clearChangeFacingInterval()
| [
"47166977+peppythegod@users.noreply.github.com"
] | 47166977+peppythegod@users.noreply.github.com |
bc4ea94f6b02b9e24b146d3a2061fc53211512ef | 3ec4823d1cf7197da0fe086613383c0d2f85ba7b | /Lesson 7 function/7.4_positional_arguments.py | 4c675f46308702213181bef6274075a9463ee4dc | [] | no_license | JamCrumpet/Lesson-notes | 268f114d420cd55ec3c87c9334814a6e8398b6e6 | 501ef9687be8da4205a640fbc391444ebd65a15d | refs/heads/master | 2022-12-16T05:58:35.413156 | 2020-09-16T14:52:19 | 2020-09-16T14:52:19 | 288,780,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | # when calling a function python will match each argument in the function call with a parameter ...
# ... in the function definition
# the simplest way to do this is bases on the order of the argument provided
# values match up this way are called positional argument
# for example consider a function that displays info about pets
# the function tell use what kind of animal each pet is the the pets name
def describe_pet(animal_type, pet_name): # two types of argument the pet name and type
"""Display information about a pet"""
print("\nI have a " + animal_type + ".")
print("My " + animal_type + "'s name is " + pet_name.title() + ".")
describe_pet("Dog", "Wes") | [
"noreply@github.com"
] | JamCrumpet.noreply@github.com |
3acd80e802c20c4c87da449e8fecd10a0d1bbc5d | 1595ffcb2e59f511cabf4b9dc2b8de66862cc5a2 | /run_phase1.py | 9edf2f184e13eb252df596eb0256db2ff85e9163 | [] | no_license | littlepretty/SensorPGM | ab741064f2c9d8c5a6c8a917ef235250f80829eb | 06c8c980b0724f18247dab31a545c504f99e045b | refs/heads/master | 2021-01-14T12:44:42.769800 | 2016-05-05T14:39:09 | 2016-05-05T14:39:09 | 52,915,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,346 | py | #!/usr/bin/env python
import csv
import logging as lg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from python_log_indenter import IndentedLoggerAdapter
def learnModel(filename, n=48, m=50):
f = open(filename, 'rb')
reader = csv.reader(f)
day1 = []
day2 = []
day3 = []
for row in reader:
day1.append([float(x) for x in row[:n]])
day2.append([float(x) for x in row[n:n*2]])
day3.append([float(x) for x in row[n*2:n*3]])
"""learn parameters for m*n random variables"""
means = np.zeros((m, n))
stdevs = np.zeros((m, n))
for i in range(0, m):
for j in range(0, n):
row = [day1[i][j], day2[i][j], day3[i][j]]
means[i][j] = np.mean(row)
stdevs[i][j] = np.std(row) / np.sqrt(len(row) - 1)
log.debug(str(means[:1]))
log.debug(str(stdevs[:1]))
return means, stdevs
def windowInferenceError(day, means, b_cnt, n=96, m=50):
error = []
f = open('w%d.csv' % b_cnt, 'wb')
writer = csv.writer(f)
writer.writerow(title)
infer_data = np.zeros((m, n))
for i in range(0, n):
test_data = day[:, i]
infer_data[:, i] = means[:, i % 48]
window_start = int(i * b_cnt) % m
window_size = b_cnt
log.debug(str(range(window_start, window_start + window_size)))
"""replace inferred data with test data for these inside window"""
for k in range(window_start, window_start + window_size):
index = k % m
infer_data[index, i] = test_data[index]
"""absolute error for time i"""
error_i = np.subtract(test_data, infer_data[:, i])
error_i = np.absolute(error_i)
error.append(error_i)
for i in range(0, m):
row = [x for x in infer_data[i, :]]
row.insert(0, i)
writer.writerow(row)
return error
def findLargestK(error, budget, m=50):
max_indices = []
indices = range(0, m)
log.debug(str(error))
for index in indices:
if len(max_indices) == budget:
break
count = 0
for j in range(0, m):
if error[index] > error[j]:
count += 1
if count >= m - budget:
max_indices.append(index)
log.debug('read sensors %s' % str(max_indices))
log.debug('#sensors = %d' % len(max_indices))
return max_indices
def varianceInferenceError(day, means, stdevs, b_cnt, n=96, m=50):
error = []
f = open('v%d.csv' % b_cnt, 'wb')
writer = csv.writer(f)
writer.writerow(title)
infer_data = np.zeros((m, n))
for i in range(0, n):
test_data = day[:, i]
infer_data[:, i] = means[:, i % 48]
"""find maximum variances' index"""
variance = stdevs[:, i % 48]
max_indices = findLargestK(variance, b_cnt, m)
"""replace most variant data with test data"""
for index in max_indices:
infer_data[index, i] = test_data[index]
"""absolute error for time i"""
error_i = np.subtract(test_data, infer_data[:, i])
error_i = np.absolute(error_i)
error.append(error_i)
for i in range(0, m):
row = [x for x in infer_data[i, :]]
row.insert(0, i)
writer.writerow(row)
return error
def inferenceTest(filename, means, stdevs, n=96, m=50):
f = open(filename, 'rb')
reader = csv.reader(f)
data = np.array(list(reader)).astype('float')
win_avg_errors = []
var_avg_errors = []
for cnt in budget_cnts:
total_err = windowInferenceError(data, means, cnt)
win_avg_err = np.sum(total_err) / (len(total_err) * len(total_err[0]))
log.info('Window Inference for %.2f budget' % cnt)
log.debug('error matrix \n' + str(total_err))
log.add().info('avg error = ' + str(win_avg_err))
log.sub()
win_avg_errors.append(win_avg_err)
total_err = varianceInferenceError(data, means, stdevs, cnt)
var_avg_err = np.sum(total_err) / (len(total_err) * len(total_err[0]))
log.info('Variance Inference for %.2f budget' % cnt)
log.debug('error matrix \n' + str(total_err))
log.add().info('avg error = ' + str(var_avg_err))
log.sub()
var_avg_errors.append(var_avg_err)
return win_avg_errors, var_avg_errors
def plotAvgError(win, var):
matplotlib.rc('font', size=18)
index = np.arange(len(budget_cnts))
bar_width = 0.27
fig, ax = plt.subplots()
rect1 = ax.bar(index, win, bar_width, color='b', hatch='/')
rect2 = ax.bar(index + bar_width, var, bar_width, color='r', hatch='\\')
ax.set_xlim([-0.5, 5])
ax.set_ylabel('Mean Absolute Error')
ax.set_xlabel('Budget Count')
ax.set_xticks(index + bar_width)
ax.set_xticklabels(('0', '5', '10', '20', '25'))
ax.legend((rect1[0], rect2[0]), ('Window', 'Variance'))
plt.savefig('%s_err.eps' % topic, format='eps',
bbox_inches='tight')
# plt.show()
def main(train_file, test_file):
means, stdevs = learnModel(train_file)
win, var = inferenceTest(test_file, means, stdevs)
print win
print var
plotAvgError(win, var)
if __name__ == '__main__':
# lg.basicConfig(level=lg.DEBUG)
lg.basicConfig(level=lg.INFO)
log = IndentedLoggerAdapter(lg.getLogger(__name__))
title = ['sensors', 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0,
5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0,
11.5, 12.0, 12.5, 13.0, 13.5, 14.0, 14.5, 15.0, 15.5, 16.0,
16.5, 17.0, 17.5, 18.0, 18.5, 19.0, 19.5, 20.0, 20.5, 21.0,
21.5, 22.0, 22.5, 23.0, 23.5, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5,
3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5,
9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5,
14.0, 14.5, 15.0, 15.5, 16.0, 16.5, 17.0, 17.5, 18.0, 18.5,
19.0, 19.5, 20.0, 20.5, 21.0, 21.5, 22.0, 22.5, 23.0, 23.5, 0.0]
budget_cnts = [20]
budget_cnts = [0, 5, 10, 20, 25]
log.info('Processing Temperature')
log.add()
topic = 'temperature'
main('intelTemperatureTrain.csv', 'intelTemperatureTest.csv')
log.sub()
log.info('Processing Humidity')
log.add()
topic = 'humidity'
main('intelHumidityTrain.csv', 'intelHumidityTest.csv')
log.sub()
| [
"littlepretty881203@gmail.com"
] | littlepretty881203@gmail.com |
e4e7ba162eaf4533e33f22af3e2304d322d02af4 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4150/codes/1585_1015.py | 7c3f11b65c17fa5d163e69c25b1d75d669bc1e27 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # Teste seu codigo aos poucos.
# Nao teste tudo no final, pois fica mais dificil de identificar erros.
# Nao se intimide com as mensagens de erro. Elas ajudam a corrigir seu codigo.
a = int(input("valor 1: "))
b = int(input("valor 2: "))
c = int(input("valor 3: "))
minimo = (min(a,b,c))
maximo = (max(a,b,c))
intermediario1 = (a+b+c)
intermediario = intermediario1 - ( minimo+maximo)
print(minimo)
print(intermediario)
print(maximo)
| [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
9e09f4b3ae1d0853cbe16395aa8aa8d3699e6d1e | 306d2a92fb331aec6ddf0794b538d6e3385a0df9 | /app/model/account.py | 8f9c579c1ff38d8f743848f6c95fcc6bd5f55c7f | [] | no_license | Zarinabonu/ForceApp | f343d3a52aee08890230c5425c9e238df99c5a7f | 13f8e8613999c4850fc6f0bfcec66f897eecbe4a | refs/heads/master | 2020-12-10T08:00:25.072289 | 2020-01-20T13:14:07 | 2020-01-20T13:14:07 | 233,540,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | from random import randint, randrange
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
class Account(models.Model):
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
f_name = models.CharField(max_length=100, null=True, blank=True)
l_name = models.CharField(max_length=100, null=True, blank=True)
m_name = models.CharField(max_length=100, null=True, blank=True)
phone = models.IntegerField(null=True, blank=True)
photo = models.ImageField(null=True, blank=True)
address = models.CharField(max_length=200, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
@receiver(post_save, sender=Account)
def create_user(sender, instance, created, **kwargs):
if created:
l = instance.l_name
f = instance.f_name
username = l+f
value = randrange(100, 999)
u = User.objects.create(username=username)
u.set_password(value)
u.save()
instance.user = u
instance.save()
@receiver(post_save, sender=User)
def create_token(sender, instance, created, **kwargs):
if created:
token = Token.objects.create(user=instance)
| [
"zarinabonu199924@gmail.com"
] | zarinabonu199924@gmail.com |
fe65fbb3d6367aae8acb39fc61f23ca80d548b1a | aa2157e595b89c3512857e41fee16e8b11d7a657 | /Fresher Lavel Logical Programms/Between Two number print prime number.py | 94f23ce6807fc887991b4303536dbc6271acaff8 | [] | no_license | biswaranjanroul/Python-Logical-Programms | efee6276eea3eafab9ee6b6e7e0910b715a504d1 | 152dcecf2ecae7891a11769f250a4dc8d9d6b15f | refs/heads/master | 2022-12-15T07:37:45.978218 | 2020-09-17T13:24:53 | 2020-09-17T13:24:53 | 296,326,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | lower=int(input("Enter lower range number:"))
upper=int(input("Enter upper range number:"))
for num in range(lower,upper+1):
if num>1:
for i in range(2,num):
if (num % i)==0:
break
else:
print(num)
| [
"biswaranjanroul2@gmail.com"
] | biswaranjanroul2@gmail.com |
3df314f92c0af4d60757e92c6b59b97b7bd43315 | 0de67c078e00b9f43bfd6c4ddb1f4ffd153f8b7e | /clubs_website/settings.py | 88f227d2f0e998d94739536e11fb99e1cbeb5270 | [] | no_license | jreinstra/menlo-clubs | c249d754942a2a49e2ebae5914a1f81a27f845ef | ff50d07e6220a537d5de42c18ae73c845d8c35d7 | refs/heads/master | 2020-04-26T11:05:12.537802 | 2015-11-18T22:18:11 | 2015-11-18T22:18:11 | 39,816,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,076 | py | """
Django settings for clubs_website project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ip-09p46cq61uibshu7r0=+fe-1smc4&%sq9@b=%hb1k5ck039'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export',
'clubs',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'clubs_website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'clubs_website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Heroku settings below
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {}
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
try:
from local_settings import *
except ImportError:
pass
| [
"jreinstra@gmail.com"
] | jreinstra@gmail.com |
fe7e24df2e555202d345122c183e1738d568fa4c | 3698934341cf6599637aac28ad90ba4d517d926a | /moonv4/moon_consul/moon_consul/__main__.py | 4d64288e9a240f046a55b31c64e709b494f4fd6b | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | hashnfv/hashnfv-moon | 286b61a66f5cf79d9261d2e2a065435958853299 | daaba34fa2ed4426bc0fde359e54a5e1b872208c | refs/heads/master | 2021-05-07T20:46:06.472789 | 2017-10-29T20:14:06 | 2017-10-29T20:14:06 | 108,938,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | from moon_consul.server import main
main()
| [
"thomas.duval@orange.com"
] | thomas.duval@orange.com |
e90fe2bdf3e7a6e8b87a0a61d09cbb2727f175fe | 20d9130fdc21756c4f8fe255583922352f5c5762 | /src/DIRAC/DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py | df43cc14d25ab6cfc22a4dfb2c18869f0490d8ad | [] | no_license | bopopescu/bes3-jinr | 095314e43f41f08bd48b248fe3ca627a5c009f58 | fdfd852c92a56192b8ee9970b66f0136e6e0afff | refs/heads/master | 2022-11-26T06:01:36.718508 | 2014-03-17T06:03:50 | 2014-03-17T06:03:50 | 282,113,617 | 0 | 0 | null | 2020-07-24T03:30:10 | 2020-07-24T03:30:09 | null | UTF-8 | Python | false | false | 15,888 | py | ########################################################################
# $HeadURL $
# File: ReplicateAndRegister.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/13 18:49:12
########################################################################
""" :mod: ReplicateAndRegister
==========================
.. module: ReplicateAndRegister
:synopsis: ReplicateAndRegister operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
ReplicateAndRegister operation handler
"""
__RCSID__ = "$Id $"
# #
# @file ReplicateAndRegister.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/13 18:49:28
# @brief Definition of ReplicateAndRegister class.
# # imports
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gMonitor
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.Resources.Storage.StorageElement import StorageElement
########################################################################
class ReplicateAndRegister( OperationHandlerBase ):
"""
.. class:: ReplicateAndRegister
ReplicateAndRegister operation handler
"""
__ftsClient = None
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
OperationHandlerBase.__init__( self, operation, csPath )
# # own gMonitor stuff for files
gMonitor.registerActivity( "ReplicateAndRegisterAtt", "Replicate and register attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateOK", "Replications successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateFail", "Replications failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterOK", "Registrations successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterFail", "Registrations failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # for FTS
gMonitor.registerActivity( "FTSScheduleAtt", "Files schedule attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleOK", "File schedule successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleFail", "File schedule failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # SE cache
self.seCache = {}
@classmethod
def ftsClient( cls ):
""" facade for FTS client """
if not cls.__ftsClient:
cls.__ftsClient = FTSClient()
return cls.__ftsClient
def __call__( self ):
""" call me maybe """
# # check replicas first
checkReplicas = self.__checkReplicas()
if not checkReplicas["OK"]:
self.log.error( checkReplicas["Message"] )
if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ):
bannedGroups = getattr( self, "FTSBannedGroups" ) if hasattr( self, "FTSBannedGroups" ) else ()
if self.request.OwnerGroup in bannedGroups:
self.log.info( "usage of FTS system is banned for request's owner" )
return self.rmTransfer()
return self.ftsTransfer()
return self.rmTransfer()
def __checkReplicas( self ):
""" check done replicas and update file states """
waitingFiles = dict( [ ( opFile.LFN, opFile ) for opFile in self.operation
if opFile.Status in ( "Waiting", "Scheduled" ) ] )
targetSESet = set( self.operation.targetSEList )
replicas = self.replicaManager().getCatalogReplicas( waitingFiles.keys() )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
return replicas
reMissing = re.compile( "no such file or directory" )
for failedLFN, errStr in replicas["Value"]["Failed"].items():
waitingFiles[failedLFN].Error = errStr
if reMissing.search( errStr.lower() ):
self.log.error( "file %s does not exists" % failedLFN )
gMonitor.addMark( "ReplicateFail", len( targetSESet ) )
waitingFiles[failedLFN].Status = "Failed"
for successfulLFN, reps in replicas["Value"]["Successful"].items():
if targetSESet.issubset( set( reps ) ):
self.log.info( "file %s has been replicated to all targets" % successfulLFN )
waitingFiles[successfulLFN].Status = "Done"
return S_OK()
def _filterReplicas( self, opFile ):
""" filter out banned/invalid source SEs """
ret = { "Valid" : [], "Banned" : [], "Bad" : [] }
replicas = self.replicaManager().getActiveReplicas( opFile.LFN )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
reNotExists = re.compile( "not such file or directory" )
replicas = replicas["Value"]
failed = replicas["Failed"].get( opFile.LFN , "" )
if reNotExists.match( failed.lower() ):
opFile.Status = "Failed"
opFile.Error = failed
return S_ERROR( failed )
replicas = replicas["Successful"][opFile.LFN] if opFile.LFN in replicas["Successful"] else {}
for repSEName in replicas:
seRead = self.rssSEStatus( repSEName, "ReadAccess" )
if not seRead["OK"]:
self.log.error( seRead["Message"] )
ret["Banned"].append( repSEName )
continue
if not seRead["Value"]:
self.log.error( "StorageElement '%s' is banned for reading" % ( repSEName ) )
repSE = self.seCache.get( repSEName, None )
if not repSE:
repSE = StorageElement( repSEName, "SRM2" )
self.seCache[repSE] = repSE
pfn = repSE.getPfnForLfn( opFile.LFN )
if not pfn["OK"]:
self.log.warn( "unable to create pfn for %s lfn: %s" % ( opFile.LFN, pfn["Message"] ) )
ret["Banned"].append( repSEName )
continue
pfn = pfn["Value"]
repSEMetadata = repSE.getFileMetadata( pfn, singleFile = True )
if not repSEMetadata["OK"]:
self.log.warn( repSEMetadata["Message"] )
ret["Banned"].append( repSEName )
continue
repSEMetadata = repSEMetadata["Value"]
seChecksum = repSEMetadata["Checksum"].replace( "x", "0" ).zfill( 8 ) if "Checksum" in repSEMetadata else None
if opFile.Checksum and opFile.Checksum != seChecksum:
self.log.warn( " %s checksum mismatch: %s %s:%s" % ( opFile.LFN,
opFile.Checksum,
repSE,
seChecksum ) )
ret["Bad"].append( repSEName )
continue
# # if we're here repSE is OK
ret["Valid"].append( repSEName )
return S_OK( ret )
def ftsTransfer( self ):
""" replicate and register using FTS """
self.log.info( "scheduling files..." )
targetSEs = self.operation.targetSEList
for targetSE in targetSEs:
writeStatus = self.rssSEStatus( targetSE, "WriteAccess" )
if not writeStatus["OK"]:
self.log.error( writeStatus["Message"] )
for opFile in self.operation:
opFile.Error = "unknown targetSE: %s" % targetSE
opFile.Status = "Failed"
self.operation.Error = "unknown targetSE: %s" % targetSE
return S_ERROR( self.operation.Error )
toSchedule = []
for opFile in self.getWaitingFilesList():
gMonitor.addMark( "FTSScheduleAtt", 1 )
# # check replicas
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
continue
replicas = replicas["Value"]
if not replicas["Valid"] and replicas["Banned"]:
self.log.warn( "unable to schedule '%s', replicas only at banned SEs" % opFile.LFN )
gMonitor.addMark( "FTSScheduleFail", 1 )
continue
validReplicas = replicas["Valid"]
bannedReplicas = replicas["Banned"]
if not validReplicas and bannedReplicas:
self.log.warn( "unable to schedule '%s', replicas only at banned SEs" % opFile.LFN )
gMonitor.addMark( "FTSScheduleFail", 1 )
continue
if validReplicas:
validTargets = list( set( self.operation.targetSEList ) - set( validReplicas ) )
if not validTargets:
self.log.info( "file %s is already present at all targets" % opFile.LFN )
opFile.Status = "Done"
continue
toSchedule.append( ( opFile.toJSON()["Value"], validReplicas, validTargets ) )
if toSchedule:
self.log.info( "found %s files to schedule" % len( toSchedule ) )
ftsSchedule = self.ftsClient().ftsSchedule( self.request.RequestID,
self.operation.OperationID,
toSchedule )
if not ftsSchedule["OK"]:
self.log.error( ftsSchedule["Message"] )
return ftsSchedule
ftsSchedule = ftsSchedule["Value"]
for fileID in ftsSchedule["Successful"]:
gMonitor.addMark( "FTSScheduleOK", 1 )
for opFile in self.operation:
if fileID == opFile.FileID:
opFile.Status = "Scheduled"
self.log.always( "%s has been scheduled for FTS" % opFile.LFN )
for fileID, reason in ftsSchedule["Failed"]:
gMonitor.addMark( "FTSScheduleFail", 1 )
for opFile in self.operation:
if fileID == opFile.FileID:
opFile.Error = reason
self.log.error( "unable to schedule %s for FTS: %s" % ( opFile.LFN, opFile.Error ) )
return S_OK()
def rmTransfer( self ):
""" replicate and register using ReplicaManager """
self.log.info( "transferring files using replica manager..." )
# # source SE
sourceSE = self.operation.SourceSE if self.operation.SourceSE else None
if sourceSE:
# # check source se for read
sourceRead = self.rssSEStatus( sourceSE, "ReadAccess" )
if not sourceRead["OK"]:
self.log.error( sourceRead["Message"] )
for opFile in self.operation:
opFile.Error = sourceRead["Message"]
opFile.Status = "Failed"
self.operation.Error = sourceRead["Message"]
gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) )
gMonitor.addMark( "ReplicateFail", len( self.operation ) )
return sourceRead
if not sourceRead["Value"]:
self.operation.Error = "SourceSE %s is banned for reading" % sourceSE
self.log.error( self.operation.Error )
return S_ERROR( self.operation.Error )
# # list of targetSEs
targetSEs = self.operation.targetSEList
# # check targetSEs for removal
bannedTargets = []
for targetSE in targetSEs:
writeStatus = self.rssSEStatus( targetSE, "WriteAccess" )
if not writeStatus["OK"]:
self.log.error( writeStatus["Message"] )
for opFile in self.operation:
opFile.Error = "unknown targetSE: %s" % targetSE
opFile.Status = "Failed"
self.operation.Error = "unknown targetSE: %s" % targetSE
return S_ERROR( self.operation.Error )
if not writeStatus["Value"]:
self.log.error( "TargetSE %s in banned for writing right now" % targetSE )
bannedTargets.append( targetSE )
self.operation.Error += "banned targetSE: %s;" % targetSE
# # some targets are banned? return
if bannedTargets:
return S_ERROR( "%s targets are banned for writing" % ",".join( bannedTargets ) )
# # loop over targetSE
for targetSE in targetSEs:
# # check target SE
targetWrite = self.rssSEStatus( targetSE, "WriteAccess" )
if not targetWrite["OK"]:
self.log.error( targetWrite["Message"] )
for opFile in self.operation:
opFile.Error = targetWrite["Message"]
opFile.Status = "Failed"
self.operation.Error = targetWrite["Message"]
return targetWrite
if not targetWrite["Value"]:
reason = "TargetSE %s is banned for writing" % targetSE
self.log.error( reason )
self.operation.Error = reason
continue
# # get waiting files
waitingFiles = self.getWaitingFilesList()
# # loop over files
for opFile in waitingFiles:
gMonitor.addMark( "ReplicateAndRegisterAtt", 1 )
lfn = opFile.LFN
if not sourceSE:
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
continue
replicas = replicas["Value"]
if not replicas["Valid"]:
self.log.warn( "unable to find valid replicas for %s" % lfn )
continue
# # get the first one in the list
sourceSE = replicas["Valid"][0]
# # call ReplicaManager
res = self.replicaManager().replicateAndRegister( lfn, targetSE, sourceSE = sourceSE )
if res["OK"]:
if lfn in res["Value"]["Successful"]:
if "replicate" in res["Value"]["Successful"][lfn]:
repTime = res["Value"]["Successful"][lfn]["replicate"]
self.log.info( "file %s replicated at %s in %s s." % ( lfn, targetSE, repTime ) )
gMonitor.addMark( "ReplicateOK", 1 )
if "register" in res["Value"]["Successful"][lfn]:
gMonitor.addMark( "RegisterOK", 1 )
regTime = res["Value"]["Successful"][lfn]["register"]
self.log.info( "file %s registered at %s in %s s." % ( lfn, targetSE, regTime ) )
else:
gMonitor.addMark( "RegisterFail", 1 )
self.log.info( "failed to register %s at %s." % ( lfn, targetSE ) )
opFile.Error = "Failed to register"
opFile.Status = "Failed"
# # add register replica operation
self.addRegisterReplica( opFile, targetSE )
else:
self.log.info( "failed to replicate %s to %s." % ( lfn, targetSE ) )
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "Failed to replicate"
else:
gMonitor.addMark( "ReplicateFail", 1 )
reason = res["Value"]["Failed"][lfn]
self.log.error( "failed to replicate and register file %s at %s: %s" % ( lfn, targetSE, reason ) )
opFile.Error = reason
else:
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "ReplicaManager error: %s" % res["Message"]
self.log.error( opFile.Error )
if not opFile.Error:
self.log.info( "file %s has been replicated to all targetSEs" % lfn )
opFile.Status = "Done"
return S_OK()
def addRegisterReplica( self, opFile, targetSE ):
""" add RegisterReplica operation for file
:param File opFile: operation file
:param str targetSE: target SE
"""
# # add RegisterReplica operation
registerOperation = Operation()
registerOperation.Type = "RegisterFile"
registerOperation.TargetSE = targetSE
registerFile = File()
registerFile.LFN = opFile.LFN
registerFile.PFN = opFile.PFN
registerFile.GUID = opFile.GUID
registerFile.Checksum = opFile.Checksum
registerFile.ChecksumType = opFile.ChecksumType
registerFile.Size = opFile.Size
registerOperation.addFile( registerFile )
self.request.insertAfter( registerOperation, self.operation )
return S_OK()
| [
"gavelock@gmail.com"
] | gavelock@gmail.com |
6ed5d3c73ea9d4460a02feac33d5553147dbe7f1 | 956cc6ff2b58a69292f7d1223461bc9c2b9ea6f1 | /setups/cuda9.2/setup_all.py | b015fd581d34eec93edcca09d13adfb412600491 | [
"Apache-2.0"
] | permissive | Aanisha/monk_v1 | c24279b2b461df9b3de2984bae0e2583aba48143 | c9e89b2bc0c1dbb320aa6da5cba0aa1c1526ad72 | refs/heads/master | 2022-12-29T00:37:15.320129 | 2020-10-18T09:12:13 | 2020-10-18T09:12:13 | 286,278,278 | 0 | 0 | Apache-2.0 | 2020-08-09T16:51:02 | 2020-08-09T16:51:02 | null | UTF-8 | Python | false | false | 1,358 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="monk_cuda92", # Replace with your own username
version="0.0.1",
author="Tessellate Imaging",
author_email="abhishek@tessellateimaging.com",
description="Monk Classification Library - Cuda92 - backends - pytorch, keras, gluon",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Tessellate-Imaging/monk_v1",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Environment :: GPU :: NVIDIA CUDA :: 9.2",
],
install_requires=[
'scipy',
'scikit-learn',
'scikit-image',
'opencv-python',
'pillow==6.0.0',
'tqdm',
'gpustat',
'psutil',
'pandas',
'GPUtil',
'mxnet-cu92==1.5.1',
'gluoncv==0.6',
'torch==1.4.0',
'torchvision==0.5.0',
'keras==2.2.5',
'tensorflow-gpu==1.12.0',
'torch==1.4.0',
'tabulate',
'netron',
'networkx',
'matplotlib',
'pylg',
'ipywidgets'
],
python_requires='>=3.6',
)
| [
"abhishek4273@gmail.com"
] | abhishek4273@gmail.com |
db97c2663b86fb19553854a8de36158cd641b64b | a5638a2ff9381b1a5b804eab3d90db04e9614c4f | /Python/Easy/multiplesOfANumber.py | 3ab47faf75bd90882b94721a9d2e81973e7a609f | [] | no_license | jeffthemaximum/CodeEval | c9e1fe5a966718a627d72e3f73f9c1bddb42c0ef | e2e85e7564a711c2ae83acbcab6b5c67023b3659 | refs/heads/master | 2021-01-17T07:21:25.490894 | 2016-07-18T23:54:22 | 2016-07-18T23:54:22 | 40,488,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | import sys
with open(sys.argv[1], 'r') as input:
test_cases = input.read().strip().splitlines()
for test in test_cases:
x = test.split(',') #an array
i = 1
flag = True
while flag:
if (int(x[1]) * i) > int(x[0]):
print int(x[1]) * i
flag = False
else:
i = i + 1
continue
break | [
"frey.maxim@gmail.com"
] | frey.maxim@gmail.com |
ea33a4aa07a7a8aff2e7ee7cb5f217a354933eb0 | 34e0865fb4915390e77336e81b2c87ec2bf52df6 | /settings/__init__.py | 69a047331a5bef0788da3e0eaaef1892c43a58c9 | [] | no_license | HiPiH/local | 3702be6b140fe879188e9623ede27adfc1ce8765 | 6c3bd2c0818c780977c2081ab72906f0166625dd | refs/heads/master | 2021-01-25T04:50:29.096944 | 2011-12-24T08:21:39 | 2011-12-24T08:21:39 | 3,026,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | from base import *
from local import *
| [
"admin@nvk.su"
] | admin@nvk.su |
59c2d15633d53876d3a08d4b794723977d0ed9e0 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_assets_operations.py | 2ff6391f63b470ccfca07a99a46a3e0233b964c2 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 38,428 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._assets_operations import (
build_create_or_update_request,
build_delete_request,
build_get_encryption_key_request,
build_get_request,
build_list_container_sas_request,
build_list_request,
build_list_streaming_locators_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AssetsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.media.aio.AzureMediaServices`'s
:attr:`assets` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.Asset"]:
"""List Assets.
List Assets in the Media Services account with optional filtering and ordering.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param filter: Restricts the set of items returned. Default value is None.
:type filter: str
:param top: Specifies a non-negative integer n that limits the number of items returned from a
collection. The service returns the number of available items up to but not greater than the
specified value n. Default value is None.
:type top: int
:param orderby: Specifies the key by which the result collection should be ordered. Default
value is None.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Asset or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.media.models.Asset]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AssetCollection]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
orderby=orderby,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AssetCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets"} # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any) -> _models.Asset:
"""Get an Asset.
Get the details of an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Asset]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Asset", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: _models.Asset,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Asset:
"""Create or update an Asset.
Creates or updates an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Required.
:type parameters: ~azure.mgmt.media.models.Asset
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Asset:
"""Create or update an Asset.
Creates or updates an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: Union[_models.Asset, IO],
**kwargs: Any
) -> _models.Asset:
"""Create or update an Asset.
Creates or updates an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.media.models.Asset or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Asset]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Asset")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Asset", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Asset", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any
) -> None:
"""Delete an Asset.
Deletes an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: _models.Asset,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Asset:
"""Update an Asset.
Updates an existing Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Required.
:type parameters: ~azure.mgmt.media.models.Asset
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Asset:
"""Update an Asset.
Updates an existing Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: Union[_models.Asset, IO],
**kwargs: Any
) -> _models.Asset:
"""Update an Asset.
Updates an existing Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.media.models.Asset or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Asset]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Asset")
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Asset", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}"} # type: ignore
@overload
async def list_container_sas(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: _models.ListContainerSasInput,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AssetContainerSas:
"""List the Asset URLs.
Lists storage container URLs with shared access signatures (SAS) for uploading and downloading
Asset content. The signatures are derived from the storage account keys.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Required.
:type parameters: ~azure.mgmt.media.models.ListContainerSasInput
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetContainerSas or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetContainerSas
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def list_container_sas(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.AssetContainerSas:
"""List the Asset URLs.
Lists storage container URLs with shared access signatures (SAS) for uploading and downloading
Asset content. The signatures are derived from the storage account keys.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetContainerSas or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetContainerSas
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def list_container_sas(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: Union[_models.ListContainerSasInput, IO],
**kwargs: Any
) -> _models.AssetContainerSas:
"""List the Asset URLs.
Lists storage container URLs with shared access signatures (SAS) for uploading and downloading
Asset content. The signatures are derived from the storage account keys.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:param parameters: The request parameters. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.media.models.ListContainerSasInput or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetContainerSas or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetContainerSas
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.AssetContainerSas]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ListContainerSasInput")
request = build_list_container_sas_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.list_container_sas.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AssetContainerSas", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_container_sas.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/listContainerSas"} # type: ignore
@distributed_trace_async
async def get_encryption_key(
self, resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any
) -> _models.StorageEncryptedAssetDecryptionData:
"""Gets the Asset storage key.
Gets the Asset storage encryption keys used to decrypt content created by version 2 of the
Media Services API.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageEncryptedAssetDecryptionData or the result of cls(response)
:rtype: ~azure.mgmt.media.models.StorageEncryptedAssetDecryptionData
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.StorageEncryptedAssetDecryptionData]
request = build_get_encryption_key_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_encryption_key.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("StorageEncryptedAssetDecryptionData", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_encryption_key.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/getEncryptionKey"} # type: ignore
@distributed_trace_async
async def list_streaming_locators(
self, resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any
) -> _models.ListStreamingLocatorsResponse:
"""List Streaming Locators.
Lists Streaming Locators which are associated with this asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
Required.
:type resource_group_name: str
:param account_name: The Media Services account name. Required.
:type account_name: str
:param asset_name: The Asset name. Required.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListStreamingLocatorsResponse or the result of cls(response)
:rtype: ~azure.mgmt.media.models.ListStreamingLocatorsResponse
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ListStreamingLocatorsResponse]
request = build_list_streaming_locators_request(
resource_group_name=resource_group_name,
account_name=account_name,
asset_name=asset_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_streaming_locators.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("ListStreamingLocatorsResponse", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_streaming_locators.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/listStreamingLocators"} # type: ignore
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
0cadc9640709c0c2dd78d9014603c391ed1cf5fa | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /045_functions/009_functools_module/examples/13-chain.py | d7e02c6a1ee13cce91b500f4c51f840784da80d8 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 172 | py | """Пример использования функции chain модуля itertools"""
from itertools import chain
for i in chain(range(2), range(3)):
print(i)
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
9a46dd5c59b0f01363d42c4eead8dcff3f0a4dbf | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/insert_20200610160638.py | 4b1c20b5c255f499b25e5a60015365369be8bac8 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # nums is a list
# find where n is to be inserted
# soo,you loop through the array
# the array is sorted
# to know the position you should check whethere n is greater than nums[i]
# continue the loop as you check
def Insert(nums,n):
for i in range(len(nums)):
print(nums[i])
# if n in nums[i]:
# print(i)
Insert([1,3,5,6], 2)
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
6a7a82c9c0a99d0b647ecff1e459848d4a2ac2e3 | 41f960a830752877bf2248bb2c620491752ccfe5 | /fork/multipprocessing.py | 1fe7394082645c192a8111fbf878346eed726efa | [] | no_license | drakhero/python_base | 368617032d2d3d5388f22de2cb2ca0af81de00ec | 90848ef630ab607a1b5563f773e1b4ca7eaef08f | refs/heads/master | 2020-04-24T09:51:09.596139 | 2019-02-23T03:52:35 | 2019-02-23T03:52:35 | 171,875,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from multiprocessing import Process
import time
#事件1
def fun1():
print("子进程做事件1")
#创建1个进程
p = Process(target=fun1)
#进程启动,执行fun1函数中的代码
p.start()
time.sleep(1)
#父进程
print('父进程在做事')
| [
"17504336124@163.com"
] | 17504336124@163.com |
ad80d80796527bb59b38b55a1a1ec677fb086100 | 94c4a1a14cc9e68584912340c8b2fd54686dd638 | /Day9/select_Ftp/server/server.py | e50b72d485949db91c8b08747249b21954a9e665 | [] | no_license | Anne19953/LearnPython | 33da4ae57b3aed9cb687567958cafa8a55ff2b7b | a2bcb620ed453ff802862ae31efd0e8c159d8bfe | refs/heads/master | 2020-08-09T00:25:31.839253 | 2019-10-10T02:53:45 | 2019-10-10T02:53:45 | 192,083,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,289 | py | #!/usr/bin/env python
# coding:utf-8
"""
Name : server.py
Author : anne
Time : 2019-08-27 17:28
Desc:
"""
import os
import time
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
import socket
import selectors
class selectTtpServer:
def __init__(self):
self.dic = {}
self.hasReceived = 0
self.sel = selectors.DefaultSelector()
self.create_socket()
self.handle()
#注册socket
def create_socket(self):
server = socket.socket()
server.bind(('127.0.0.1',8885))
server.listen(5)
server.setblocking(False) #设置为非阻塞
self.sel.register(server,selectors.EVENT_READ,self.accept)
print('服务端已开启,等待用户链接。。。')
#监听
def handle(self):
while True:
events = self.sel.select() # 监听
for key, mask in events:
callback = key.data # 第一次是accept函数的地址,如果监听到是conn变化就是read函数的地址
callback(key.fileobj, mask) # 执行accept(),key.fileobj 就是socket,
# 执行read(),key.fileobj 就是conn
def accept(self,sock,mask):
conn, addr = sock.accept()
print('accepted', conn, 'from', addr)
conn.setblocking(False)
self.sel.register(conn, selectors.EVENT_READ, self.read) # 将conn与read函数进行绑定
self.dic[conn] = {}
def read(self,conn,mask):
try:
if not self.dic[conn]:
data = conn.recv(1024)
cmd,filename,filesize = str(data,encoding='utf-8').split('|')
self.dic = {conn:{'cmd':cmd,'filename':filename,'filesize':int(filesize)}}
if cmd == 'put':
conn.send(bytes('OK',encoding='utf-8'))
if self.dic[conn]['cmd'] == 'get':
file = os.path.join(BASE_DIR,'download',filename)
if os.path.exists(file):
filesize = os.path.getsize(file)
send_info = '%s|%s'%('YES',filesize)
conn.send(bytes(send_info,encoding='utf-8'))
else:
send_info = '%s|%s'%('NO',0)
conn.send(bytes(send_info,encoding='utf-8'))
else:
if self.dic[conn].get('cmd',None):
cmd = self.dic[conn].get('cmd')
if hasattr(self,cmd):
func = getattr(self,cmd)
func(conn)
else:
print('error cmd!')
except Exception as e:
print('error',e)
self.sel.unregister(conn)
conn.close()
def put(self,conn):
fileName = self.dic[conn]['filename']
fileSize = self.dic[conn]['filesize']
path = os.path.join(BASE_DIR,'upload',fileName)
recv_data = conn.recv(1024)
self.hasReceived += len(recv_data)
with open(path,'ab') as f:
f.write(recv_data)
if fileSize == self.hasReceived:
if conn in self.dic.keys():
self.dic[conn] = {}
print('%s 上传完毕!'%fileName)
def get(self,conn):
pass
if __name__ == '__main__':
selectTtpServer()
| [
"anne@199534.com"
] | anne@199534.com |
b7f03924a22aa5e1a6b5585208e2c3461ac89f15 | bf64d19174ef332f39e2d8210f3eb4f783262554 | /lib/networks/Resnet18_fcn_classifier_test.py | af272fabe9f6981c3d58530213a26620b2263652 | [] | no_license | juzisedefeimao/cv | 3e4dd7deee471321e071ca996769fc3b65481993 | fb9e9292030481f5a26efde4003fb83d37a34962 | refs/heads/master | 2020-05-30T14:29:13.253563 | 2019-06-02T01:08:53 | 2019-06-02T01:08:53 | 189,791,743 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,027 | py | from lib.networks.network import Network
import numpy as np
from lib.networks.netconfig import cfg
import tensorflow as tf
cls_num = cfg.ZLRM.TRAIN.CLASSIFY_NUM
class Resnet18_fcn_classifier_test(Network):
def __init__(self):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None ,None, 3], name='data')
self.layers = {'data': self.data}
self.setup()
def setup(self):
bn_trainable = False
(
self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1', relu=False, trainable=True)
.batch_normalization(name='bn1', relu=True, trainable=bn_trainable)
.max_pool(3, 3, 2, 2, name='pool1', padding='VALID')
)
# ======================变换形状适应第一组模块=======================
(
self.feed('pool1')
.conv(1, 1, 64, 1, 1, name='transform1_conv', relu=False, trainable=True)
.batch_normalization(name='transform1_bn', relu=False, trainable=bn_trainable)
)
# ======================第一组模块===========================
(
self.feed('pool1')
.conv(3, 3, 64, 1, 1, name='res1_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res1_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 64, 1, 1, name='res1_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res1_1_bn2', relu=False, trainable=bn_trainable)
)
(
self.feed('transform1_bn', 'res1_1_bn2')
.add(name='res1_1_add')
.relu(name='res1_1_relu')
.conv(3, 3, 64, 1, 1, name='res1_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res1_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 64, 1, 1, name='res1_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res1_2_bn2', relu=False, trainable=bn_trainable)
)
# ======================计算残差变换形状适应第二组模块=======================
(
self.feed('transform1_bn', 'res1_2_bn2')
.add(name='res1_2_add')
.relu(name='res1_2_relu')
.conv(1, 1, 128, 2, 2, name='transform2_conv', relu=False, trainable=True)
.batch_normalization(name='transform2_bn', relu=False, trainable=bn_trainable)
)
# ======================第二组模块===========================
(
self.feed('res1_2_relu')
.conv(3, 3, 128, 2, 2, name='res2_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res2_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 128, 1, 1, name='res2_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res2_1_bn2', relu=False, trainable=bn_trainable)
)
(
self.feed('transform2_bn', 'res2_1_bn2')
.add(name='res2_1_add')
.relu(name='res2_1_relu')
.conv(3, 3, 128, 1, 1, name='res2_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res2_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 128, 1, 1, name='res2_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res2_2_bn2', relu=True, trainable=bn_trainable)
)
# ======================计算残差变换形状适应第三组模块=======================
(
self.feed('transform2_bn', 'res2_2_bn2')
.add(name='res2_2_add')
.relu(name='res2_2_relu')
.conv(1, 1, 256, 2, 2, name='transform3_conv', relu=False, trainable=True)
.batch_normalization(name='transform3_bn', relu=False, trainable=bn_trainable)
)
# ======================第三组模块===========================
(
self.feed('res2_2_relu')
.conv(3, 3, 256, 2, 2, name='res3_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res3_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 256, 1, 1, name='res3_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res3_1_bn2', relu=True, trainable=bn_trainable)
)
(
self.feed('transform3_bn', 'res3_1_bn2')
.add(name='res3_1_add')
.relu(name='res3_1_relu')
.conv(3, 3, 256, 1, 1, name='res3_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res3_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 256, 1, 1, name='res3_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res3_2_bn2', relu=True, trainable=bn_trainable)
)
# ======================计算残差变换形状适应第四组模块=======================
(
self.feed('transform3_bn', 'res3_2_bn2')
.add(name='res3_2_add')
.relu(name='res3_2_relu')
.conv(1, 1, 512, 2, 2, name='transform4_conv', relu=False, trainable=True)
.batch_normalization(name='transform4_bn', relu=False, trainable=bn_trainable)
)
# ======================第四组模块===========================
(
self.feed('res3_2_relu')
.conv(3, 3, 512, 2, 2, name='res4_1_conv1', relu=False, trainable=True)
.batch_normalization(name='res4_1_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 512, 1, 1, name='res4_1_conv2', relu=False, trainable=True)
.batch_normalization(name='res4_1_bn2', relu=True, trainable=bn_trainable)
)
(
self.feed('transform4_bn', 'res4_1_bn2')
.add(name='res4_1_add')
.relu(name='res4_1_relu')
.conv(3, 3, 512, 1, 1, name='res4_2_conv1', relu=False, trainable=True)
.batch_normalization(name='res4_2_bn1', relu=True, trainable=bn_trainable)
.conv(3, 3, 512, 1, 1, name='res4_2_conv2', relu=False, trainable=True)
.batch_normalization(name='res4_2_bn2', relu=True, trainable=bn_trainable)
)
# ======================计算残差变换结束模块=======================
(
self.feed('transform4_bn', 'res4_2_bn2')
.add(name='res4_2_add')
.relu(name='res4_2_relu')
.conv(1, 1, cls_num * cfg.ZLRM.PSROIPOOL * cfg.ZLRM.PSROIPOOL, 1, 1, name='fcn_cls', trainable=True)
.ps_pool(output_dim=cls_num, group_size=cfg.ZLRM.PSROIPOOL, name='pspooled_cls_rois')
.avg_pool(cfg.ZLRM.PSROIPOOL, cfg.ZLRM.PSROIPOOL, cfg.ZLRM.PSROIPOOL, cfg.ZLRM.PSROIPOOL, name='cls_score')
.softmax(name='cls_prob')
) | [
"17696272096@163.com"
] | 17696272096@163.com |
b58c2b669fb5c4e9e55d6b371358d5583c171602 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_369/ch1_2019_08_20_10_55_01_631896.py | d56e48800aac2ab498b0ca3094618d3400a6d5fc | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | def calcula_valor_devido(vp, n, j):
vf = 1
return vf | [
"you@example.com"
] | you@example.com |
05c69d8e932f1b7de373e2b187bfd9d583ee9ff4 | ac3093b2b1b37244fbd10f6eee4de22fa50911da | /links/forms.py | cecf01dc0104575fdf01dc26ebe776d62ce94999 | [
"MIT"
] | permissive | moshthepitt/product.co.ke | 7b5e7b18d0fd2673be52455dbfbcbbeecf2b9224 | 41b32c4019f30ce9483b4d84f335450f45f0e1cb | refs/heads/master | 2021-01-14T11:53:06.899243 | 2016-05-12T13:02:22 | 2016-05-12T13:02:22 | 57,992,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML
from crispy_forms.bootstrap import Field, FormActions
from .models import Link
class LinkForm(forms.ModelForm):
description = forms.CharField(
max_length=750,
help_text=_("A short description. Please limit to 750 cahracters."),
widget=forms.Textarea
)
class Meta:
model = Link
fields = ['title', 'link', 'description']
def __init__(self, *args, **kwargs):
super(LinkForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'link-form'
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('title'),
Field('link'),
Field('description'),
FormActions(
Submit('submit', _('Save'), css_class='btn-success'),
HTML(
"<a class='btn btn-default' href='{% url \"home\" %}'>Cancel</a>")
)
)
| [
"kelvin@jayanoris.com"
] | kelvin@jayanoris.com |
201c93a9c10fd1270ca83cbee6395c9d6ab7dd48 | 1b2407f35191917818ea7f276079aa8f62429770 | /nova/pci/manager.py | 34b088fe928bb5a8c4841d2d54a7c682e25055f3 | [
"Apache-2.0"
] | permissive | ISCAS-VDI/nova-base | 67838b54230d250b71fd1067c4a754afbc258883 | dbb6bba94f8a3eae5ed420d8af3431ab116c3fa7 | refs/heads/master | 2021-01-20T19:08:51.403722 | 2016-06-07T06:46:54 | 2016-06-07T06:46:54 | 60,588,545 | 0 | 1 | Apache-2.0 | 2020-07-24T00:41:15 | 2016-06-07T06:38:23 | Python | UTF-8 | Python | false | false | 14,207 | py | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _LW
from nova import objects
from nova.objects import fields
from nova.pci import stats
from nova.pci import whitelist
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class PciDevTracker(object):
"""Manage pci devices in a compute node.
This class fetches pci passthrough information from hypervisor
and tracks the usage of these devices.
It's called by compute node resource tracker to allocate and free
devices to/from instances, and to update the available pci passthrough
devices information from hypervisor periodically.
`pci_devs` attribute of this class is the in-memory "master copy" of all
devices on each compute host, and all data changes that happen when
claiming/allocating/freeing
devices HAVE TO be made against instances contained in `pci_devs` list,
because they are periodically flushed to the DB when the save()
method is called.
It is unsafe to fetch PciDevice objects elsewhere in the code for update
purposes as those changes will end up being overwritten when the `pci_devs`
are saved.
"""
def __init__(self, context, node_id=None):
"""Create a pci device tracker.
If a node_id is passed in, it will fetch pci devices information
from database, otherwise, it will create an empty devices list
and the resource tracker will update the node_id information later.
"""
super(PciDevTracker, self).__init__()
self.stale = {}
self.node_id = node_id
self.dev_filter = whitelist.Whitelist(CONF.pci_passthrough_whitelist)
self.stats = stats.PciDeviceStats(dev_filter=self.dev_filter)
self._context = context
if node_id:
self.pci_devs = objects.PciDeviceList.get_by_compute_node(
context, node_id)
else:
self.pci_devs = objects.PciDeviceList(objects=[])
self._build_device_tree(self.pci_devs)
self._initial_instance_usage()
def _initial_instance_usage(self):
self.allocations = collections.defaultdict(list)
self.claims = collections.defaultdict(list)
for dev in self.pci_devs:
uuid = dev.instance_uuid
if dev.status == fields.PciDeviceStatus.CLAIMED:
self.claims[uuid].append(dev)
elif dev.status == fields.PciDeviceStatus.ALLOCATED:
self.allocations[uuid].append(dev)
elif dev.status == fields.PciDeviceStatus.AVAILABLE:
self.stats.add_device(dev)
@property
def all_devs(self):
return self.pci_devs
def save(self, context):
for dev in self.pci_devs:
if dev.obj_what_changed():
with dev.obj_alternate_context(context):
dev.save()
if dev.status == fields.PciDeviceStatus.DELETED:
self.pci_devs.objects.remove(dev)
@property
def pci_stats(self):
return self.stats
def update_devices_from_hypervisor_resources(self, devices_json):
"""Sync the pci device tracker with hypervisor information.
To support pci device hot plug, we sync with the hypervisor
periodically, fetching all devices information from hypervisor,
update the tracker and sync the DB information.
Devices should not be hot-plugged when assigned to a guest,
but possibly the hypervisor has no such guarantee. The best
we can do is to give a warning if a device is changed
or removed while assigned.
:param devices_json: The JSON-ified string of device information
that is returned from the virt driver's
get_available_resource() call in the
pci_passthrough_devices key.
"""
devices = []
for dev in jsonutils.loads(devices_json):
if self.dev_filter.device_assignable(dev):
devices.append(dev)
self._set_hvdevs(devices)
@staticmethod
def _build_device_tree(all_devs):
"""Build a tree of devices that represents parent-child relationships.
We need to have the relationships set up so that we can easily make
all the necessary changes to parent/child devices without having to
figure it out at each call site.
This method just adds references to relevant instances already found
in `pci_devs` to `child_devices` and `parent_device` fields of each
one.
Currently relationships are considered for SR-IOV PFs/VFs only.
"""
# Ensures that devices are ordered in ASC so VFs will come
# after their PFs.
all_devs.sort(key=lambda x: x.address)
parents = {}
for dev in all_devs:
if dev.status in (fields.PciDeviceStatus.REMOVED,
fields.PciDeviceStatus.DELETED):
# NOTE(ndipanov): Removed devs are pruned from
# self.pci_devs on save() so we need to make sure we
# are not looking at removed ones as we may build up
# the tree sooner than they are pruned.
continue
if dev.dev_type == fields.PciDeviceType.SRIOV_PF:
dev.child_devices = []
parents[dev.address] = dev
elif dev.dev_type == fields.PciDeviceType.SRIOV_VF:
dev.parent_device = parents.get(dev.parent_addr)
if dev.parent_device:
parents[dev.parent_addr].child_devices.append(dev)
def _set_hvdevs(self, devices):
exist_addrs = set([dev.address for dev in self.pci_devs])
new_addrs = set([dev['address'] for dev in devices])
for existed in self.pci_devs:
if existed.address in exist_addrs - new_addrs:
try:
existed.remove()
except exception.PciDeviceInvalidStatus as e:
LOG.warning(_LW("Trying to remove device with %(status)s "
"ownership %(instance_uuid)s because of "
"%(pci_exception)s"),
{'status': existed.status,
'instance_uuid': existed.instance_uuid,
'pci_exception': e.format_message()})
# Note(yjiang5): remove the device by force so that
# db entry is cleaned in next sync.
existed.status = fields.PciDeviceStatus.REMOVED
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
self.stats.remove_device(existed)
else:
new_value = next((dev for dev in devices if
dev['address'] == existed.address))
new_value['compute_node_id'] = self.node_id
if existed.status in (fields.PciDeviceStatus.CLAIMED,
fields.PciDeviceStatus.ALLOCATED):
# Pci properties may change while assigned because of
# hotplug or config changes. Although normally this should
# not happen.
# As the devices have been assigned to an instance,
# we defer the change till the instance is destroyed.
# We will not sync the new properties with database
# before that.
# TODO(yjiang5): Not sure if this is a right policy, but
# at least it avoids some confusion and, if needed,
# we can add more action like killing the instance
# by force in future.
self.stale[new_value['address']] = new_value
else:
existed.update_device(new_value)
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
dev_obj = objects.PciDevice.create(self._context, dev)
self.pci_devs.objects.append(dev_obj)
self.stats.add_device(dev_obj)
self._build_device_tree(self.pci_devs)
def _claim_instance(self, context, pci_requests, instance_numa_topology):
instance_cells = None
if instance_numa_topology:
instance_cells = instance_numa_topology.cells
devs = self.stats.consume_requests(pci_requests.requests,
instance_cells)
if not devs:
return None
instance_uuid = pci_requests.instance_uuid
for dev in devs:
dev.claim(instance_uuid)
if instance_numa_topology and any(
dev.numa_node is None for dev in devs):
LOG.warning(_LW("Assigning a pci device without numa affinity to"
"instance %(instance)s which has numa topology"),
{'instance': instance_uuid})
return devs
def _allocate_instance(self, instance, devs):
for dev in devs:
dev.allocate(instance)
def allocate_instance(self, instance):
devs = self.claims.pop(instance['uuid'], [])
self._allocate_instance(instance, devs)
if devs:
self.allocations[instance['uuid']] += devs
def claim_instance(self, context, pci_requests, instance_numa_topology):
devs = []
if self.pci_devs and pci_requests.requests:
instance_uuid = pci_requests.instance_uuid
devs = self._claim_instance(context, pci_requests,
instance_numa_topology)
if devs:
self.claims[instance_uuid] = devs
return devs
def free_device(self, dev, instance):
"""Free device from pci resource tracker
:param dev: cloned pci device object that needs to be free
:param instance: the instance that this pci device
is allocated to
"""
for pci_dev in self.pci_devs:
# find the matching pci device in the pci resource tracker
# pci device. Once found one free it.
if dev == pci_dev and dev.instance_uuid == instance['uuid']:
self._free_device(pci_dev)
def _free_device(self, dev, instance=None):
freed_devs = dev.free(instance)
stale = self.stale.pop(dev.address, None)
if stale:
dev.update_device(stale)
for dev in freed_devs:
self.stats.add_device(dev)
def _free_instance(self, instance):
# Note(yjiang5): When an instance is resized, the devices in the
# destination node are claimed to the instance in prep_resize stage.
# However, the instance contains only allocated devices
# information, not the claimed one. So we can't use
# instance['pci_devices'] to check the devices to be freed.
for dev in self.pci_devs:
if dev.status in (fields.PciDeviceStatus.CLAIMED,
fields.PciDeviceStatus.ALLOCATED):
if dev.instance_uuid == instance['uuid']:
self._free_device(dev)
def free_instance(self, context, instance):
if self.allocations.pop(instance['uuid'], None):
self._free_instance(instance)
elif self.claims.pop(instance['uuid'], None):
self._free_instance(instance)
def update_pci_for_instance(self, context, instance, sign):
"""Update PCI usage information if devices are de/allocated.
"""
if not self.pci_devs:
return
if sign == -1:
self.free_instance(context, instance)
if sign == 1:
self.allocate_instance(instance)
def clean_usage(self, instances, migrations, orphans):
"""Remove all usages for instances not passed in the parameter.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
existed = set(inst['uuid'] for inst in instances)
existed |= set(mig['instance_uuid'] for mig in migrations)
existed |= set(inst['uuid'] for inst in orphans)
for uuid in self.claims.keys():
if uuid not in existed:
devs = self.claims.pop(uuid, [])
for dev in devs:
self._free_device(dev)
for uuid in self.allocations.keys():
if uuid not in existed:
devs = self.allocations.pop(uuid, [])
for dev in devs:
self._free_device(dev)
def get_instance_pci_devs(inst, request_id=None):
"""Get the devices allocated to one or all requests for an instance.
- For generic PCI request, the request id is None.
- For sr-iov networking, the request id is a valid uuid
- There are a couple of cases where all the PCI devices allocated to an
instance need to be returned. Refer to libvirt driver that handles
soft_reboot and hard_boot of 'xen' instances.
"""
pci_devices = inst.pci_devices
return [device for device in pci_devices if
device.request_id == request_id or request_id == 'all']
| [
"wangfeng@nfs.iscas.ac.cn"
] | wangfeng@nfs.iscas.ac.cn |
e73be0af28c5ac96600a3a75f96a175162ec3cc0 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-CoreMedia/PyObjCTest/test_cmformatdescription.py | 6183d9f1b41bedaf86c0bde06ab3a474ff318afc | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,902 | py | from PyObjCTools.TestSupport import *
import CoreMedia
class TestCMFormatDescription (TestCase):
def test_constants(self):
self.assertEqual(CoreMedia.kCMFormatDescriptionError_InvalidParameter, -12710)
self.assertEqual(CoreMedia.kCMFormatDescriptionError_AllocationFailed, -12711)
self.assertEqual(CoreMedia.kCMFormatDescriptionError_ValueNotAvailable, -12718)
self.assertEqual(CoreMedia.kCMMediaType_Video, fourcc(b'vide'))
self.assertEqual(CoreMedia.kCMMediaType_Audio, fourcc(b'soun'))
self.assertEqual(CoreMedia.kCMMediaType_Muxed, fourcc(b'muxx'))
self.assertEqual(CoreMedia.kCMMediaType_Text, fourcc(b'text'))
self.assertEqual(CoreMedia.kCMMediaType_ClosedCaption, fourcc(b'clcp'))
self.assertEqual(CoreMedia.kCMMediaType_Subtitle, fourcc(b'sbtl'))
self.assertEqual(CoreMedia.kCMMediaType_TimeCode, fourcc(b'tmcd'))
self.assertEqual(CoreMedia.kCMMediaType_Metadata, fourcc(b'meta'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes422, fourcc(b'apcn'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes422LT, fourcc(b'apcs'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes422Proxy, fourcc(b'apco'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProResRAW, fourcc(b'aprn'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProResRAWHQ, fourcc(b'aprh'))
self.assertEqual(CoreMedia.kCMAudioCodecType_AAC_LCProtected, fourcc(b'paac'))
self.assertEqual(CoreMedia.kCMAudioCodecType_AAC_AudibleProtected, fourcc(b'aaac'))
self.assertEqual(CoreMedia.kCMAudioFormatDescriptionMask_StreamBasicDescription, 1<<0)
self.assertEqual(CoreMedia.kCMAudioFormatDescriptionMask_MagicCookie, 1<<1)
self.assertEqual(CoreMedia.kCMAudioFormatDescriptionMask_ChannelLayout, 1<<2)
self.assertEqual(CoreMedia.kCMAudioFormatDescriptionMask_Extensions, 1<<3)
self.assertEqual(CoreMedia.kCMAudioFormatDescriptionMask_All, CoreMedia.kCMAudioFormatDescriptionMask_StreamBasicDescription
| CoreMedia.kCMAudioFormatDescriptionMask_MagicCookie
| CoreMedia.kCMAudioFormatDescriptionMask_ChannelLayout
| CoreMedia.kCMAudioFormatDescriptionMask_Extensions)
self.assertEqual(CoreMedia.kCMPixelFormat_32ARGB, 32)
self.assertEqual(CoreMedia.kCMPixelFormat_32BGRA, fourcc(b'BGRA'))
self.assertEqual(CoreMedia.kCMPixelFormat_24RGB, 24)
self.assertEqual(CoreMedia.kCMPixelFormat_16BE555, 16)
self.assertEqual(CoreMedia.kCMPixelFormat_16BE565, fourcc(b'B565'))
self.assertEqual(CoreMedia.kCMPixelFormat_16LE555, fourcc(b'L555'))
self.assertEqual(CoreMedia.kCMPixelFormat_16LE565, fourcc(b'L565'))
self.assertEqual(CoreMedia.kCMPixelFormat_16LE5551, fourcc(b'5551'))
self.assertEqual(CoreMedia.kCMPixelFormat_422YpCbCr8, fourcc(b'2vuy'))
self.assertEqual(CoreMedia.kCMPixelFormat_422YpCbCr8_yuvs, fourcc(b'yuvs'))
self.assertEqual(CoreMedia.kCMPixelFormat_444YpCbCr8, fourcc(b'v308'))
self.assertEqual(CoreMedia.kCMPixelFormat_4444YpCbCrA8, fourcc(b'v408'))
self.assertEqual(CoreMedia.kCMPixelFormat_422YpCbCr16, fourcc(b'v216'))
self.assertEqual(CoreMedia.kCMPixelFormat_422YpCbCr10, fourcc(b'v210'))
self.assertEqual(CoreMedia.kCMPixelFormat_444YpCbCr10, fourcc(b'v410'))
self.assertEqual(CoreMedia.kCMPixelFormat_8IndexedGray_WhiteIsZero, 0x00000028)
self.assertEqual(CoreMedia.kCMVideoCodecType_422YpCbCr8,CoreMedia.kCMPixelFormat_422YpCbCr8)
self.assertEqual(CoreMedia.kCMVideoCodecType_Animation, fourcc(b'rle '))
self.assertEqual(CoreMedia.kCMVideoCodecType_Cinepak, fourcc(b'cvid'))
self.assertEqual(CoreMedia.kCMVideoCodecType_JPEG, fourcc(b'jpeg'))
self.assertEqual(CoreMedia.kCMVideoCodecType_JPEG_OpenDML, fourcc(b'dmb1'))
self.assertEqual(CoreMedia.kCMVideoCodecType_SorensonVideo, fourcc(b'SVQ1'))
self.assertEqual(CoreMedia.kCMVideoCodecType_SorensonVideo3, fourcc(b'SVQ3'))
self.assertEqual(CoreMedia.kCMVideoCodecType_H263, fourcc(b'h263'))
self.assertEqual(CoreMedia.kCMVideoCodecType_H264, fourcc(b'avc1'))
self.assertEqual(CoreMedia.kCMVideoCodecType_HEVC, fourcc(b'hvc1'))
self.assertEqual(CoreMedia.kCMVideoCodecType_MPEG4Video, fourcc(b'mp4v'))
self.assertEqual(CoreMedia.kCMVideoCodecType_MPEG2Video, fourcc(b'mp2v'))
self.assertEqual(CoreMedia.kCMVideoCodecType_MPEG1Video, fourcc(b'mp1v'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCNTSC, fourcc(b'dvc '))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPAL, fourcc(b'dvcp'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCProPAL, fourcc(b'dvpp'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPro50NTSC, fourcc(b'dv5n'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPro50PAL, fourcc(b'dv5p'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPROHD720p60, fourcc(b'dvhp'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPROHD720p50, fourcc(b'dvhq'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPROHD1080i60, fourcc(b'dvh6'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPROHD1080i50, fourcc(b'dvh5'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPROHD1080p30, fourcc(b'dvh3'))
self.assertEqual(CoreMedia.kCMVideoCodecType_DVCPROHD1080p25, fourcc(b'dvh2'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes4444XQ, fourcc(b'ap4x'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes4444, fourcc(b'ap4h'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes422HQ, fourcc(b'apch'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes422, fourcc(b'apcn'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes422LT, fourcc(b'apcs'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProRes422Proxy, fourcc(b'apco'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProResRAW, fourcc(b'aprn'))
self.assertEqual(CoreMedia.kCMVideoCodecType_AppleProResRAWHQ, fourcc(b'aprh'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_720p30, fourcc(b'hdv1'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_1080i60, fourcc(b'hdv2'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_1080i50, fourcc(b'hdv3'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_720p24, fourcc(b'hdv4'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_720p25, fourcc(b'hdv5'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_1080p24, fourcc(b'hdv6'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_1080p25, fourcc(b'hdv7'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_1080p30, fourcc(b'hdv8'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_720p60, fourcc(b'hdv9'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_HDV_720p50, fourcc(b'hdva'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD_1080i60_VBR35, fourcc(b'xdv2'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD_1080i50_VBR35, fourcc(b'xdv3'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD_1080p24_VBR35, fourcc(b'xdv6'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD_1080p25_VBR35, fourcc(b'xdv7'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD_1080p30_VBR35, fourcc(b'xdv8'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_720p24_VBR35, fourcc(b'xdv4'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_720p25_VBR35, fourcc(b'xdv5'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_720p30_VBR35, fourcc(b'xdv1'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_720p50_VBR35, fourcc(b'xdva'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_720p60_VBR35, fourcc(b'xdv9'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_1080i60_VBR35, fourcc(b'xdvb'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_1080i50_VBR35, fourcc(b'xdvc'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_1080p24_VBR35, fourcc(b'xdvd'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_1080p25_VBR35, fourcc(b'xdve'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_EX_1080p30_VBR35, fourcc(b'xdvf'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_720p50_CBR50, fourcc(b'xd5a'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_720p60_CBR50, fourcc(b'xd59'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_1080i60_CBR50, fourcc(b'xd5b'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_1080i50_CBR50, fourcc(b'xd5c'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_1080p24_CBR50, fourcc(b'xd5d'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_1080p25_CBR50, fourcc(b'xd5e'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_1080p30_CBR50, fourcc(b'xd5f'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD_540p, fourcc(b'xdhd'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_540p, fourcc(b'xdh2'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_720p24_CBR50, fourcc(b'xd54'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_720p25_CBR50, fourcc(b'xd55'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XDCAM_HD422_720p30_CBR50, fourcc(b'xd51'))
self.assertEqual(CoreMedia.kCMMPEG2VideoProfile_XF, fourcc(b'xfz1'))
self.assertEqual(CoreMedia.kCMMuxedStreamType_MPEG1System, fourcc(b'mp1s'))
self.assertEqual(CoreMedia.kCMMuxedStreamType_MPEG2Transport, fourcc(b'mp2t'))
self.assertEqual(CoreMedia.kCMMuxedStreamType_MPEG2Program, fourcc(b'mp2p'))
self.assertEqual(CoreMedia.kCMMuxedStreamType_DV, fourcc(b'dv '))
self.assertEqual(CoreMedia.kCMClosedCaptionFormatType_CEA608, fourcc(b'c608'))
self.assertEqual(CoreMedia.kCMClosedCaptionFormatType_CEA708, fourcc(b'c708'))
self.assertEqual(CoreMedia.kCMClosedCaptionFormatType_ATSC, fourcc(b'atcc'))
self.assertEqual(CoreMedia.kCMTextFormatType_QTText, fourcc(b'text'))
self.assertEqual(CoreMedia.kCMTextFormatType_3GText, fourcc(b'tx3g'))
self.assertEqual(CoreMedia.kCMTextDisplayFlag_scrollIn, 0x00000020)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_scrollOut, 0x00000040)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_scrollDirectionMask, 0x00000180)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_scrollDirection_bottomToTop, 0x00000000)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_scrollDirection_rightToLeft, 0x00000080)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_scrollDirection_topToBottom, 0x00000100)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_scrollDirection_leftToRight, 0x00000180)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_continuousKaraoke, 0x00000800)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_writeTextVertically, 0x00020000)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_fillTextRegion, 0x00040000)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_obeySubtitleFormatting, 0x20000000)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_forcedSubtitlesPresent, 0x40000000)
self.assertEqual(CoreMedia.kCMTextDisplayFlag_allSubtitlesForced, 0x80000000)
self.assertEqual(CoreMedia.kCMTextJustification_left_top, 0)
self.assertEqual(CoreMedia.kCMTextJustification_centered, 1)
self.assertEqual(CoreMedia.kCMTextJustification_bottom_right, -1)
self.assertEqual(CoreMedia.kCMSubtitleFormatType_3GText, fourcc(b'tx3g'))
self.assertEqual(CoreMedia.kCMSubtitleFormatType_WebVTT, fourcc(b'wvtt'))
self.assertEqual(CoreMedia.kCMTimeCodeFormatType_TimeCode32, fourcc(b'tmcd'))
self.assertEqual(CoreMedia.kCMTimeCodeFormatType_TimeCode64, fourcc(b'tc64'))
self.assertEqual(CoreMedia.kCMTimeCodeFormatType_Counter32, fourcc(b'cn32'))
self.assertEqual(CoreMedia.kCMTimeCodeFormatType_Counter64, fourcc(b'cn64'))
self.assertEqual(CoreMedia.kCMTimeCodeFlag_DropFrame, 1 << 0)
self.assertEqual(CoreMedia.kCMTimeCodeFlag_24HourMax, 1 << 1)
self.assertEqual(CoreMedia.kCMTimeCodeFlag_NegTimesOK, 1 << 2)
self.assertEqual(CoreMedia.kCMMetadataFormatType_ICY, fourcc(b'icy '))
self.assertEqual(CoreMedia.kCMMetadataFormatType_ID3, fourcc(b'id3 '))
self.assertEqual(CoreMedia.kCMMetadataFormatType_Boxed, fourcc(b'mebx'))
@min_os_level('10.7')
def test_constants10_7(self):
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_OriginalCompressionSettings, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_VerbatimSampleDescription, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_VerbatimISOSampleEntry, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_FormatName, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_Depth, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureWidthRational, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureHeightRational, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureHorizontalOffsetRational, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureVerticalOffsetRational, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_FullRangeVideo, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_ICCProfile, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_BytesPerRow, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionConformsToMPEG2VideoProfile, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_TemporalQuality, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_SpatialQuality, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_Version, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_RevisionLevel, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_Vendor, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionVendor_Apple, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_DisplayFlags, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_BackgroundColor, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionColor_Red, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionColor_Green, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionColor_Blue, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionColor_Alpha, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_DefaultTextBox, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionRect_Top, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionRect_Left, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionRect_Bottom, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionRect_Right, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_DefaultStyle, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_StartChar, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_Font, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_FontFace, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_ForegroundColor, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_FontSize, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_HorizontalJustification, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_VerticalJustification, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_EndChar, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_FontTable, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_TextJustification, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_Height, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionStyle_Ascent, unicode)
self.assertIsInstance(CoreMedia.kCMTextFormatDescriptionExtension_DefaultFontName, unicode)
self.assertIsInstance(CoreMedia.kCMTimeCodeFormatDescriptionExtension_SourceReferenceName, unicode)
self.assertIsInstance(CoreMedia.kCMTimeCodeFormatDescriptionKey_Value, unicode)
self.assertIsInstance(CoreMedia.kCMTimeCodeFormatDescriptionKey_LangCode, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtensionKey_MetadataKeyTable, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_Namespace, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_Value, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_LocalID, unicode)
@min_os_level('10.8')
def test_constants10_8(self):
self.assertIsInstance(CoreMedia.kCMFormatDescriptionColorPrimaries_P22, unicode)
@min_os_level('10.10')
def test_constants10_10(self):
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_DataType, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_DataTypeNamespace, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_ConformingDataTypes, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_LanguageTag, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionMetadataSpecificationKey_Identifier, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionMetadataSpecificationKey_DataType, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionMetadataSpecificationKey_ExtendedLanguageTag, unicode)
@min_os_level('10.11')
def test_constants10_11(self):
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_VerbatimImageDescription, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_CleanAperture, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureWidth, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureHeight, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureHorizontalOffset, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_CleanApertureVerticalOffset, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_FieldCount, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_FieldDetail, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionFieldDetail_TemporalTopFirst, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionFieldDetail_TemporalBottomFirst, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionFieldDetail_SpatialFirstLineEarly, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionFieldDetail_SpatialFirstLineLate, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_PixelAspectRatio, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_ColorPrimaries, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionColorPrimaries_ITU_R_709_2, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionColorPrimaries_EBU_3213, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionColorPrimaries_SMPTE_C, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionColorPrimaries_DCI_P3, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionColorPrimaries_P3_D65, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionColorPrimaries_ITU_R_2020, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_TransferFunction, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_ITU_R_709_2, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_SMPTE_240M_1995, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_UseGamma, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_ITU_R_2020, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_GammaLevel, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_YCbCrMatrix, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionYCbCrMatrix_ITU_R_709_2, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionYCbCrMatrix_ITU_R_601_4, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionYCbCrMatrix_SMPTE_240M_1995, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionYCbCrMatrix_ITU_R_2020, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_ChromaLocationTopField, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_ChromaLocationBottomField, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionChromaLocation_Left, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionChromaLocation_Center, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionChromaLocation_TopLeft, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionChromaLocation_Top, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionChromaLocation_BottomLeft, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionChromaLocation_Bottom, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionChromaLocation_DV420, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_StructuralDependency, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionKey_SetupData, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescription_StructuralDependencyKey_DependencyIsInvalidFlag, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionMetadataSpecificationKey_StructuralDependency, unicode)
self.assertIsInstance(CoreMedia.kCMMetadataFormatDescriptionMetadataSpecificationKey_SetupData, unicode)
@min_os_level('10.12')
def test_constants10_12(self):
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_SMPTE_ST_428_1, unicode)
@min_os_level('10.13')
def test_constants10_13(self):
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_ITU_R_2100_HLG, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_MasteringDisplayColorVolume, unicode)
self.assertIsInstance(CoreMedia.kCMFormatDescriptionExtension_ContentLightLevelInfo, unicode)
@min_os_level('10.14')
def test_constants10_14(self):
self.assertIsInstance(CoreMedia.kCMFormatDescriptionTransferFunction_Linear, unicode)
def test_structs(self):
v = CoreMedia.CMVideoDimensions()
self.assertEqual(v.width, 0)
self.assertEqual(v.height, 0)
def test_types(self):
self.assertIsCFType(CoreMedia.CMFormatDescriptionRef)
@expectedFailure
@min_os_level('10.7')
def test_functions_manual(self):
self.assertIsNotInstance(CoreMedia.CMVideoFormatDescriptionCreateFromH264ParameterSets, objc.function)
self.assertIsNotInstance(CoreMedia.CMVideoFormatDescriptionCreateFromHEVCParameterSets, objc.function)
self.fail("CMVideoFormatDescriptionGetH264ParameterSetAtIndex") # Needs manual wrapper
self.fail("CMVideoFormatDescriptionGetHEVCParameterSetAtIndex") # Needs manual wrapper
@min_os_level('10.7')
def test_functions(self):
self.assertArgIsOut(CoreMedia.CMFormatDescriptionCreate, 4)
self.assertArgIsCFRetained(CoreMedia.CMFormatDescriptionCreate, 4)
self.assertIsInstance(CoreMedia.CMFormatDescriptionGetTypeID(), (int, long))
self.assertResultIsBOOL(CoreMedia.CMFormatDescriptionEqual)
self.assertResultIsBOOL(CoreMedia.CMFormatDescriptionEqualIgnoringExtensionKeys)
CoreMedia.CMFormatDescriptionGetMediaType
CoreMedia.CMFormatDescriptionGetMediaSubType
CoreMedia.CMFormatDescriptionGetExtensions
CoreMedia.CMFormatDescriptionGetExtension
self.assertArgIsIn(CoreMedia.CMAudioFormatDescriptionCreate, 3)
self.assertArgIsIn(CoreMedia.CMAudioFormatDescriptionCreate, 5)
self.assertArgSizeInArg(CoreMedia.CMAudioFormatDescriptionCreate, 5, 4)
self.assertArgIsOut(CoreMedia.CMAudioFormatDescriptionCreate, 7)
self.assertArgIsCFRetained(CoreMedia.CMAudioFormatDescriptionCreate, 7)
CoreMedia.CMAudioFormatDescriptionGetStreamBasicDescription
self.assertArgIsOut(CoreMedia.CMAudioFormatDescriptionGetMagicCookie, 1)
self.assertResultSizeInArg(CoreMedia.CMAudioFormatDescriptionGetMagicCookie, 1)
self.assertArgIsOut(CoreMedia.CMAudioFormatDescriptionGetChannelLayout, 1)
self.assertResultSizeInArg(CoreMedia.CMAudioFormatDescriptionGetChannelLayout, 1)
self.assertArgIsOut(CoreMedia.CMAudioFormatDescriptionGetFormatList, 1)
self.assertResultSizeInArg(CoreMedia.CMAudioFormatDescriptionGetFormatList, 1)
# XXX: Need to derefence pointer
CoreMedia.CMAudioFormatDescriptionGetRichestDecodableFormat
CoreMedia.CMAudioFormatDescriptionGetMostCompatibleFormat
self.assertArgIsOut(CoreMedia.CMAudioFormatDescriptionCreateSummary, 3)
self.assertArgIsCFRetained(CoreMedia.CMAudioFormatDescriptionCreateSummary, 3)
self.assertResultIsBOOL(CoreMedia.CMAudioFormatDescriptionEqual)
self.assertArgIsOut(CoreMedia.CMAudioFormatDescriptionEqual, 3)
self.assertArgIsOut(CoreMedia.CMVideoFormatDescriptionCreate, 5)
self.assertArgIsCFRetained(CoreMedia.CMVideoFormatDescriptionCreate, 5)
self.assertArgIsOut(CoreMedia.CMVideoFormatDescriptionCreateForImageBuffer, 2)
self.assertArgIsCFRetained(CoreMedia.CMVideoFormatDescriptionCreateForImageBuffer, 2)
self.assertIs(CoreMedia.CMVideoFormatDescriptionGetCodecType, CoreMedia.CMFormatDescriptionGetMediaSubType)
CoreMedia.CMVideoFormatDescriptionGetDimensions
CoreMedia.CMVideoFormatDescriptionGetPresentationDimensions
CoreMedia.CMVideoFormatDescriptionGetCleanAperture
CoreMedia.CMVideoFormatDescriptionGetExtensionKeysCommonWithImageBuffers
self.assertResultIsBOOL(CoreMedia.CMVideoFormatDescriptionMatchesImageBuffer)
self.assertArgIsOut(CoreMedia.CMMuxedFormatDescriptionCreate, 3)
self.assertArgIsCFRetained(CoreMedia.CMMuxedFormatDescriptionCreate, 3)
self.assertArgIsOut(CoreMedia.CMTextFormatDescriptionGetDisplayFlags, 1)
self.assertArgIsOut(CoreMedia.CMTextFormatDescriptionGetJustification, 1)
self.assertArgIsOut(CoreMedia.CMTextFormatDescriptionGetJustification, 2)
self.assertArgIsBOOL(CoreMedia.CMTextFormatDescriptionGetDefaultTextBox, 1)
self.assertArgIsOut(CoreMedia.CMTextFormatDescriptionGetDefaultTextBox, 3)
self.assertArgIsOut(CoreMedia.CMTextFormatDescriptionGetDefaultStyle, 1)
self.assertArgHasType(CoreMedia.CMTextFormatDescriptionGetDefaultStyle, 2, b'o^Z')
self.assertArgHasType(CoreMedia.CMTextFormatDescriptionGetDefaultStyle, 3, b'o^Z')
self.assertArgHasType(CoreMedia.CMTextFormatDescriptionGetDefaultStyle, 4, b'o^Z')
self.assertArgHasType(CoreMedia.CMTextFormatDescriptionGetDefaultStyle, 5, b'o^' + objc._C_CGFloat)
self.assertArgHasType(CoreMedia.CMTextFormatDescriptionGetDefaultStyle, 6, b'o^' + objc._C_CGFloat)
self.assertArgIsFixedSize(CoreMedia.CMTextFormatDescriptionGetDefaultStyle, 6, 4)
self.assertArgIsOut(CoreMedia.CMTextFormatDescriptionGetFontName, 2)
self.assertArgIsCFRetained(CoreMedia.CMTextFormatDescriptionGetFontName, 2)
self.assertIs(CoreMedia.CMSubtitleFormatDescriptionGetFormatType, CoreMedia.CMFormatDescriptionGetMediaSubType)
self.assertArgIsOut(CoreMedia.CMTimeCodeFormatDescriptionCreate, 6)
self.assertArgIsCFRetained(CoreMedia.CMTimeCodeFormatDescriptionCreate, 6)
CoreMedia.CMTimeCodeFormatDescriptionGetFrameDuration
CoreMedia.CMTimeCodeFormatDescriptionGetFrameQuanta
CoreMedia.CMTimeCodeFormatDescriptionGetTimeCodeFlags
self.assertArgIsOut(CoreMedia.CMMetadataFormatDescriptionCreateWithKeys, 3)
self.assertArgIsCFRetained(CoreMedia.CMMetadataFormatDescriptionCreateWithKeys, 3)
CoreMedia.CMMetadataFormatDescriptionGetKeyWithLocalID
@min_os_level('10.10')
def test_functions10_10(self):
self.assertArgIsOut(CoreMedia.CMMetadataFormatDescriptionCreateWithMetadataSpecifications, 3)
self.assertArgIsCFRetained(CoreMedia.CMMetadataFormatDescriptionCreateWithMetadataSpecifications, 3)
self.assertArgIsOut(CoreMedia.CMMetadataFormatDescriptionCreateWithMetadataFormatDescriptionAndMetadataSpecifications, 3)
self.assertArgIsCFRetained(CoreMedia.CMMetadataFormatDescriptionCreateWithMetadataFormatDescriptionAndMetadataSpecifications, 3)
self.assertArgIsOut(CoreMedia.CMMetadataFormatDescriptionCreateByMergingMetadataFormatDescriptions, 3)
self.assertArgIsCFRetained(CoreMedia.CMMetadataFormatDescriptionCreateByMergingMetadataFormatDescriptions, 3)
CoreMedia.CMMetadataFormatDescriptionGetIdentifiers
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
d3494680cb985e08a8f4a2443295f2c1ab1e7ee4 | d30d96db433f64c846c3af9dde767cbab9ad4527 | /Basics/eight.py | 06708fc6846ad7a394d0d47aaec975b2c0bde571 | [] | no_license | narasimhareddyprostack/Ramesh-CloudDevOps | 6999a3007624ba3e5bba140d52c3fdb8ab4533d2 | fe348afbe33090f619ccde8d8f2de15b0ca15c5f | refs/heads/master | 2023-03-12T23:18:04.281750 | 2021-03-05T10:22:41 | 2021-03-05T10:22:41 | 280,594,773 | 1 | 1 | null | 2020-07-18T14:26:37 | 2020-07-18T06:12:25 | Python | UTF-8 | Python | false | false | 87 | py | a = 10
b = 20
if(a>b):
print("A is greather")
else:
print("B is greaterh")
| [
"narasimhareddyk18@gmail.com"
] | narasimhareddyk18@gmail.com |
5175579f392fdd01f9d697c958ad775828bc60e6 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_Lag1Trend/cycle_0/ar_/test_artificial_1024_None_Lag1Trend_0__20.py | df4b1542088823a0e850d7fad698d78d1c3a2ab6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 265 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
5670329fa6b2b9d2c32ff04c5c850e4875756d9a | cfa2417f07259e512a1bbface4f1f4ccd66502c6 | /test/test_Util/test_ifu_util.py | e96fc21970bd9cf8f6e5e013a2f6847d8ecdd5c7 | [
"BSD-3-Clause"
] | permissive | jiwoncpark/hierArc | 3779439533d3c9c5fe2e687f4bdf737dfc7673e8 | 3f31c0ae7540387fe98f778035d415c3cff38756 | refs/heads/master | 2021-05-18T21:32:45.590675 | 2020-12-23T00:01:01 | 2020-12-23T00:01:01 | 251,431,028 | 0 | 0 | NOASSERTION | 2020-03-30T21:20:08 | 2020-03-30T21:20:08 | null | UTF-8 | Python | false | false | 788 | py | import numpy as np
import numpy.testing as npt
import pytest
from hierarc.Util import ifu_util
class TestIFUUtil(object):
def setup(self):
pass
def test_radial_dispersion(self):
num = 10
dispersion_map = np.zeros((num, num))
weight_map_disp = np.ones((num, num))
velocity_map = np.ones((num, num))
weight_map_v = np.ones((num, num))
r_bins = np.linspace(0, 5, 5)
fiber_scale = 1
flux_map = np.ones((num, num))
disp_r, error_r = ifu_util.binned_total(dispersion_map, weight_map_disp, velocity_map, weight_map_v, flux_map, fiber_scale, r_bins)
assert len(disp_r) == len(r_bins) - 1
npt.assert_almost_equal(disp_r, 1, decimal=6)
if __name__ == '__main__':
pytest.main()
| [
"sibirrer@gmail.com"
] | sibirrer@gmail.com |
9c5cf35ff34488e229c78f5ead98107df7ee0731 | 2352bc07e12b0256913559cf3485a360569ccd5e | /Practice/code_class/Crossin-practices/python_weekly_question/capitalize_words.py | ec65131874c754c9acc1776b29306e73cfbd2694 | [] | no_license | Dis-count/Python_practice | 166ae563be7f6d99a12bdc0e221c550ef37bd4fd | fa0cae54e853157a1d2d78bf90408c68ce617c1a | refs/heads/master | 2022-12-12T03:38:24.091529 | 2021-12-22T09:51:59 | 2021-12-22T09:51:59 | 224,171,833 | 2 | 1 | null | 2022-12-08T05:29:38 | 2019-11-26T11:07:00 | Jupyter Notebook | UTF-8 | Python | false | false | 292 | py | #-*- coding:utf-8 -*-
quote = "How can mirrors be real if our eyes aren't real"
def fuc(string):
'''func - 使某一字符串所有字符都大写'''
list_new_string = [i.capitalize() for i in string.split()]
new_string = ' '.join(c_n_s)
return new_string
print(fuc(quote))
| [
"492193947@qq.com"
] | 492193947@qq.com |
93a7ca5e3dd067ae8ba260b3d7caf5fabfba15e3 | d62e0bf740c8b9ee96dd161d6f1ed2e6a01616fc | /examples/twisted/wamp/pubsub/simple/example1/server.py | f9f14248acac91914594c4f5e85446bc83aa22fc | [
"Python-2.0",
"Apache-2.0"
] | permissive | devbazy/AutobahnPython | 4ff867e84811fb1c43083a139f4184824e1df8d2 | f14ca62fd59e15e078796e88153c26cb2f54a35a | refs/heads/master | 2020-12-31T05:39:39.824777 | 2014-01-19T10:02:17 | 2014-01-19T10:02:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import listenWS
from autobahn.wamp import WampServerFactory, \
WampServerProtocol
class MyPubSubServerProtocol(WampServerProtocol):
"""
Protocol class for our simple demo WAMP server.
"""
def onSessionOpen(self):
## When the WAMP session to a client has been established,
## register a single fixed URI as PubSub topic that our
## message broker will handle
##
self.registerForPubSub("http://example.com/myEvent1")
if __name__ == '__main__':
log.startLogging(sys.stdout)
## our WAMP/WebSocket server
##
wampFactory = WampServerFactory("ws://localhost:9000", debugWamp = True)
wampFactory.protocol = MyPubSubServerProtocol
listenWS(wampFactory)
## our Web server (for static Web content)
##
webFactory = Site(File("."))
reactor.listenTCP(8080, webFactory)
## run the Twisted network reactor
##
reactor.run()
| [
"tobias.oberstein@tavendo.de"
] | tobias.oberstein@tavendo.de |
a1103dcb4e7eb7b56f15ff380ba3acb5cd6f83bb | 521c1beeb2776161ae6d550be35cd0c180887129 | /elvis/utils/xmlparse.py | 2b88d726b7ac85815abba0ae6279ea0aa4354d38 | [] | no_license | elvis2workspace/CustomLibrary | 601b552792ac2c33beeb709474f857c82793ac7e | 6449eea8aa99ca1172f54b669d97703d36132ce3 | refs/heads/master | 2021-01-23T21:33:05.617871 | 2017-09-26T01:57:48 | 2017-09-26T01:57:48 | 58,983,388 | 0 | 1 | null | 2016-12-06T09:56:14 | 2016-05-17T02:22:14 | Python | UTF-8 | Python | false | false | 1,199 | py | #!/usr/bin/env python
#coding=utf-8
'''
Created on 2016年1月27日
@author: zhang.xiuhai
'''
import re
import urllib
local = 'D:\\testdir\\'
def getHtml(url):
page = urllib.urlopen(url)#创建一个表示远程url的类文件对象,然后像本地文件一样操作这个类文件对象来获取远程数据。
html = page.read()
return html
def getImg(html):
reg = r'res="(.+?\.jpg)"'
imgre = re.compile(reg)
imglist = re.findall(imgre, html)
x = 0
for imgurl in imglist:
urllib.urlretrieve(imgurl, local+'%s.jpg' % x, callbackfunc)#将远程数据下载在本地当前目录,命名规则(回掉函数显示进度)
x+=1
#return imglist
def callbackfunc(blocknum, blocksize, totalsize):
'''回调函数
@blocknum:已经下载的数据块
@blocksize:数据块的大小
@totalsize:远程文件的大小
'''
percent = 100.0*blocknum*blocksize/totalsize
if percent > 100:
percent = 100
print "%.2f%%"% percent
if __name__ == '__main__':
html = getHtml("http://image.baidu.com/")
print html
# for item in getImg(html):
# print item
print getImg(html) | [
"xiuhai5052@hotmail.com"
] | xiuhai5052@hotmail.com |
3c3bdc089a625d916f6ef5bf84198778bd7e5cb4 | 9a1aad53af7e772d0aa290d231007957f38ef904 | /signup/admin.py | aa338d073f70bf9274059378e56f81d1666bb7fa | [] | no_license | vikasjoshis001/BioData | fc82145c16d05a0ad58aaaa84f53338037db56f4 | a0dd6e0dedda85842a47a584a88fd99b6d6633bd | refs/heads/master | 2023-02-07T17:43:18.056425 | 2020-12-18T06:53:19 | 2020-12-18T06:53:19 | 322,514,167 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.contrib import admin
from .models import SignUp
admin.site.register(SignUp)
# Register your models here.
| [
"vikasjoshis001@gmail.com"
] | vikasjoshis001@gmail.com |
09f2f4cbd1def8e671fdebdfccf32af887d19f4a | eb6f6e4fd2b358805d8b41c883a27c80067f748c | /chapter8/material.py | c8635923273822c0e8dd71b349921c2a7273c105 | [] | no_license | candyer/Ray-tracing | a4b5637ccb63eec0bddf5533abf54c6d0b164c57 | d121380f9f35ad4ad596bec1d58b4021ba022f58 | refs/heads/master | 2021-07-13T03:04:37.938321 | 2017-10-18T18:41:20 | 2017-10-18T18:41:20 | 105,039,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py |
from abc import ABCMeta, abstractmethod
class Material:
__metaclass__ = ABCMeta
@abstractmethod
def scatter(self, ray_in, rec, attenuation, scattered):
pass
def reflect(self, v, n):
return v - n * (v.dot(n) * 2)
| [
"candyer@users.noreply.github.com"
] | candyer@users.noreply.github.com |
158e10f3fc5f523e30714eeeb904b4f920555b5a | 25bb4e760769cc483a20f27b6312698891dce034 | /python/Sets/py-set-difference-operation-English.py | c13712f5d892bff5d1b28e0d29f3bfb1088ffb44 | [] | no_license | rangaeeeee/codes-hackerrank | e13d22adff1ef74974e34251d9bfac6cfd36f2b0 | ce7fdf7f336c10164fd2f779d4ed3713849d7c2b | refs/heads/master | 2021-01-19T17:07:28.451983 | 2017-09-01T18:05:33 | 2017-09-01T18:05:33 | 101,049,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | n = int(input())
nSet = set(map(int,input().split()))
b = int(input())
bSet = set(map(int,input().split()))
print(len(nSet.difference(bSet))) | [
"rrangarajan.85@gmail.com"
] | rrangarajan.85@gmail.com |
5d532060d925ffa94c8657a4ad75368b5d37e3ac | 1b3ed8b5b474e8346cf19279e3cec33ea5dc9c94 | /quick_start2/new_task.py | b8d4c3e9a85c91feeb7f6ed040d23c5d50a61f90 | [] | no_license | valerydmitrieva/rabbitmq_quick-start | 4b1d94b1fbe521b80adfd17dfdaf8afb7935548a | 8d9e46b96379b0957d9b85097d028d9162d1595e | refs/heads/master | 2020-04-19T04:46:45.405471 | 2019-01-28T13:57:45 | 2019-01-28T13:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import sys
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='',
routing_key='task_queue',
body=message,
properties=pika.BasicProperties(delivery_mode=2,))
print("[x] Sent %r" % (message,))
connection.close() | [
"test@example.com"
] | test@example.com |
188ff9f607f4cc10669d5574597fff902ae9f35b | 50402cc4388dfee3a9dbe9e121ef217759ebdba8 | /etc/MOPSO3/Swarm1d.py | 83dfc1d59f2c3d296ed01a3eb63fb8e5fa5e2df8 | [] | no_license | dqyi11/SVNBackup | bd46a69ec55e3a4f981a9bca4c8340944d8d5886 | 9ad38e38453ef8539011cf4d9a9c0a363e668759 | refs/heads/master | 2020-03-26T12:15:01.155873 | 2015-12-10T01:11:36 | 2015-12-10T01:11:36 | 144,883,382 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,483 | py | '''
Created on 2013-12-5
@author: Walter
'''
from Swarm import *;
class Swarm1D(Swarm):
def initReferenceSet(self, loadFromFile=False, nondomSetFile=None, domSetFile=None):
self.referenceSet = [];
idxCnt = 0;
for x in np.arange(-self.worldrange[0]/2, self.worldrange[0]/2, 0.05):
ref = Reference(self.particleDimension, idxCnt);
ref.pos[0,0] = x;
self.referenceSet.append(ref);
idxCnt += 1;
self.categorizeRefSet(loadFromFile, nondomSetFile, domSetFile);
self.initDomFit();
self.initNondomFit();
def initDomFit(self):
for domPos in self.dominatedSet:
domPos.fit = [];
domPos.fit.append(self.calcObjFunc(domPos.pos, 0));
domPos.fit.append(self.calcObjFunc(domPos.pos, 1));
def initNondomFit(self):
for nondomPos in self.nondominatedSet:
nondomPos.fit = [];
nondomPos.fit.append(self.calcObjFunc(nondomPos.pos, 0));
nondomPos.fit.append(self.calcObjFunc(nondomPos.pos, 1));
def getDomFit(self):
fit1 = [];
fit2 = [];
for domPos in self.dominatedSet:
fit1.append(domPos.fit[0]);
fit2.append(domPos.fit[1]);
return fit1, fit2;
def getNondomFit(self):
fit1 = [];
fit2 = [];
for nondomPos in self.nondominatedSet:
fit1.append(nondomPos.fit[0]);
fit2.append(nondomPos.fit[1]);
return fit1, fit2;
def getXDominate(self):
xPos = [];
for a in self.dominatedSet:
xPos.append(a.pos[0,0]);
return xPos;
def getXNondominate(self):
xPos = [];
for a in self.nondominatedSet:
xPos.append(a.pos[0,0]);
return xPos;
def getDominatedParticlePos(self):
xDomParPos = [];
xDomParIdx = [];
for p in self.dominatedParticles:
assert p.nondominated == False;
xDomParPos.append(p.pos[0,0]);
xDomParIdx.append(p.index);
return xDomParPos, xDomParIdx;
def getNondominatedParticlePos(self):
xNondomParPos = [];
xNondomParIdx = [];
for p in self.nondominatedParticles:
assert p.nondominated == True;
xNondomParPos.append(p.pos[0,0]);
xNondomParIdx.append(p.index);
return xNondomParPos, xNondomParIdx;
def plot(self,count,path=None):
fig1 = plt.figure();
ax1 = fig1.add_subplot(111);
midValIdx = self.particleNum/2;
midLine = [];
for nondom in self.nondominatedSet:
midLine.append(midValIdx);
nondomPosX = self.getXNondominate();
ax1.plot(nondomPosX, midLine, 's', color='#7a7a7a');
'''
posX = [];
posY = [];
for p in self.particles:
posX.append(p.pos[0,0]);
posY.append(p.index);
ax1.plot(posX, posY, 'or');
'''
domPX, domPIdx = self.getDominatedParticlePos();
nondomPX, nondomPIdx = self.getNondominatedParticlePos();
ax1.plot(domPX, domPIdx, 'o', color='#0000ff');
ax1.plot(nondomPX, nondomPIdx, 'o', color='#ff0000');
domx_fit = [];
domy_fit = [];
for p in self.dominatedParticles:
domx_line = [p.pos[0,0], p.pos[0,0] + p.vel[0,0] * self.interval];
domy_line = [p.index, p.index];
pos = [p.pos[0,0]];
domx_fit.append(self.objfuncs[0](pos));
domy_fit.append(self.objfuncs[1](pos));
ax1.plot(domx_line, domy_line, '-b');
nondomx_fit = [];
nondomy_fit = [];
for p in self.nondominatedParticles:
nondomx_line = [p.pos[0,0], p.pos[0,0] + p.vel[0,0] * self.interval];
nondomy_line = [p.index, p.index];
pos = [p.pos[0,0]];
nondomx_fit.append(self.objfuncs[0](pos));
nondomy_fit.append(self.objfuncs[1](pos));
ax1.plot(nondomx_line, nondomy_line, '-b');
globalbest = self.particles[self.globalbestAgentIdx];
ax1.plot(globalbest.localbestPos[0,0], self.globalbestAgentIdx, 'ob');
ax1.plot(self.swarm_centroid[0,0], int(self.particleNum/2), 's', color='orange');
ax1.set_xlabel("particle position");
ax1.set_ylabel("particle index");
title1 = "1D solution space @ " + str(count);
ax1.set_title(title1);
filename1 = title1 + ".png";
if path != None:
filename1 = path + "\\" + filename1;
plt.savefig(filename1);
fig2 = plt.figure();
ax2 = fig2.add_subplot(111);
'''
x_range = [];
y_range = [];
for x in np.arange(-self.worldrange[0]/2, self.worldrange[0]/2, 0.05):
pos = [x];
x_range.append(self.objfuncs[0](pos));
y_range.append(self.objfuncs[1](pos));
ax2.plot(x_range, y_range, '.r');
'''
domfit1, domfit2 = self.getDomFit();
nondomfit1, nondomfit2 = self.getNondomFit();
#print str(len(self.dominatedSet)) + " " + str(len(self.nondominatedSet));
ax2.plot(domfit1, domfit2, '.', color='#aaaaaa');
ax2.plot(nondomfit1, nondomfit2, '.', color='#7a7a7a');
ax2.legend(["dominant", "nondominant"])
ax2.plot(domx_fit, domy_fit, 'ob');
ax2.plot(nondomx_fit, nondomy_fit, 'og');
ax2.plot(self.swarm_centroid_fitness[0,0], self.swarm_centroid_fitness[0,1], 's', color='orange');
ax2.plot(self.average_fitness[0,0], self.average_fitness[0,1], 'x', color='brown');
ax2.set_xlabel("Fitness 1");
ax2.set_ylabel("Fitness 2");
title2 = "1D fitness space @ " + str(count);
ax2.set_title(title2);
filename2 = title2 + ".png";
if path != None:
filename2 = path + "\\" + filename2;
plt.savefig(filename2);
if len(self.histCentroid) > 0 and self.showCentroidHist==True:
fig3 = plt.figure();
ax3 = fig3.add_subplot(111);
idx3 = [];
ctX = [];
for i in range(len(self.histCentroid)):
idx3.append(i);
ctX.append(self.histCentroid[i][0,0]);
ax3.plot(idx3, ctX);
ax3.set_xlabel("Iteration");
ax3.set_ylabel("Position");
ax3.legend(["Position X"]);
title3 = "1D Centroid of " + str(count) + " run";
ax3.set_title(title3);
filename3 = title3 + ".png";
if path != None:
filename3 = path + "\\" + filename3;
plt.savefig(filename3);
if len(self.histAvgFitness) > 0 and self.showAverageFitness==True:
fig4 = plt.figure();
ax4 = fig4.add_subplot(111);
idx4 = []
avX = [];
avY = [];
for i in range(len(self.histAvgFitness)):
idx4.append(i);
avX.append(self.histAvgFitness[i][0,0]);
avY.append(self.histAvgFitness[i][0,1]);
ax4.plot(idx4, avX);
ax4.plot(idx4, avY);
ax4.set_xlabel("Iteration");
ax4.set_ylabel("Value");
ax4.legend(["Function 1","Function 2"]);
title4 = "1D Average Fitness of " + str(count) + " run";
ax4.set_title(title4);
filename4 = title4 + ".png";
if path != None:
filename4 = path + "\\" + filename4;
plt.savefig(filename4);
if len(self.histCentroidMaximin) > 0 and self.showMaximinOfCentroid==True:
fig5 = plt.figure();
ax5 = fig5.add_subplot(111);
idx5 = np.arange(len(self.histCentroidMaximin));
ax5.plot(idx4, self.histCentroidMaximin);
ax5.set_xlabel("Iteration");
ax5.set_ylabel("Value");
title5 = "1D Maximin Value of Centroid in " + str(count) + " run";
ax5.set_title(title5);
filename5 = title5 + ".png";
if path != None:
filename5 = path + "\\" + filename5;
plt.savefig(filename5);
if len(self.histGlobalbestPos) > 0 and self.showGlobalBestPosition==True:
fig6 = plt.figure();
ax6 = fig6.add_subplot(111);
idx6 = [];
gbX = [];
for i in range(len(self.histCentroid)):
idx6.append(i);
gbX.append(self.histGlobalbestPos[i][0]);
ax6.plot(idx6, gbX);
ax6.set_xlabel("Iteration");
ax6.set_ylabel("Position");
ax6.legend(["Position X"]);
title6 = "1D Global Best Position of " + str(count) + " run";
ax6.set_title(title6);
filename6 = title6 + ".png";
if path != None:
filename6 = path + "\\" + filename6;
plt.savefig(filename6);
if len(self.histPercentOfNondominance) > 0 and self.showPercentOfNondominance == True:
fig7 = plt.figure();
ax7 = fig7.add_subplot(111);
idx7 = np.arange(len(self.histPercentOfNondominance));
ax7.plot(idx7, self.histPercentOfNondominance);
ax7.set_xlabel("Iteration");
ax7.set_ylabel("Percentage");
ax7.legend(["Percentage"]);
title7 = "1D Percentage of nondominance " + str(count) + " run";
ax7.set_title(title7);
filename7 = title7 + ".png";
if path != None:
filename7 = path + "\\" + filename7;
plt.savefig(filename7);
if len(self.histPosVariance) > 0 and self.showPosVariance == True:
fig8 = plt.figure();
ax8 = fig8.add_subplot(111);
idx8 = np.arange(len(self.histPosVariance));
paretoVariance = self.paretoVar[0] * np.ones(len(self.histPosVariance));
ax8.plot(idx8, paretoVariance);
ax8.plot(idx8, self.histPosVariance);
ax8.set_xlabel("Iteration");
ax8.set_ylabel("Variance");
ax8.legend(["Pareto Set Variance", "Variance"]);
title8 = "1D Pos Variance " + str(count) + " run";
ax8.set_title(title8);
filename8 = title8 + ".png";
if path != None:
filename8 = path + "\\" + filename8;
plt.savefig(filename8);
if len(self.histFitVariance) > 0 and self.showFitVariance == True:
fig9 = plt.figure();
ax9 = fig9.add_subplot(111);
idx9 = np.arange(len(self.histFitVariance));
ax9.plot(idx9, self.histFitVariance);
ax9.set_xlabel("Iteration");
ax9.set_ylabel("Variance");
ax9.legend(["Variance"]);
title9 = "1D Fit Variance " + str(count) + " run";
ax9.set_title(title9);
filename9 = title9 + ".png";
if path != None:
filename9 = path + "\\" + filename9;
plt.savefig(filename9);
if len(self.histHausdorffDist) > 0 and self.showHausdorffDist == True:
fig10 = plt.figure();
ax10 = fig10.add_subplot(111);
idx10 = np.arange(len(self.histHausdorffDist));
ax10.plot(idx10, self.histHausdorffDist);
ax10.set_xlabel("Iteration");
ax10.set_ylabel("Distance");
title10 = "1D Hausdorff Distance " + str(count) + "run";
ax10.set_title(title10);
filename10 = title10 + ".png";
if path != None:
filename10 = path + "\\" + filename10;
plt.savefig(filename10);
#plt.show(); | [
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] | walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39 |
6ceda38c5b9c5ecefdcce21508c0ceb213a35c2e | 517600ba1ab1e7781b26df1439227f4569746d5a | /blog/migrations/0010_auto_20200521_2035.py | 76f4c00cec504ac5b01278e71bcabf23473394ae | [] | no_license | Smartybrainy/myworldgist | 885d86c838881ace6bced2492a46169dbd33b19d | 592f404c1047eccbbc8dad4b83032ffafb8d797a | refs/heads/master | 2023-01-21T19:29:47.208445 | 2020-12-05T00:52:10 | 2020-12-05T00:52:10 | 315,742,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # Generated by Django 3.0.5 on 2020-05-21 19:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20200520_1119'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='added_date',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"smartyleey@gmail.com"
] | smartyleey@gmail.com |
588a1a3d27e75aaae0ccf3b96f5c76030132b90f | e3887534dffc1b5d1d97d4a6a030b22d46ae3a5c | /shah_entp_erpnext/config/docs.py | e89464f41a4e1144fe21c69dd17ae27bad91116f | [
"MIT"
] | permissive | anandpdoshi/shah_entp_erpnext | 5da4ba98a6e668b5ec970e0c361e081fa14ebdf8 | 7c19c629188b8f1e3449fd6f3a5a0ee371d3158c | refs/heads/master | 2016-08-12T05:24:51.194469 | 2016-04-24T17:46:57 | 2016-04-24T17:46:57 | 55,857,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/shah_entp_erpnext"
# docs_base_url = "https://[org_name].github.io/shah_entp_erpnext"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Shah Enterprises ERPNext Extension"
| [
"anand@erpnext.com"
] | anand@erpnext.com |
ceb1b4e0137321f1a236b1f2b9cd803efd2087b6 | 77fa2374a6d119a5a1890857ff16fc9f15004882 | /clishdeonly_15466/settings.py | 4a31aac7d924862912ff89cc47a756534ff99607 | [] | no_license | crowdbotics-apps/clishdeonly-15466 | b8ec985c6d819a3296c1763c0f38efacca2f6c7c | 5d136cbfab7f37755d2eae2ac0e54c9fcb912623 | refs/heads/master | 2022-12-10T12:37:08.378715 | 2020-04-05T16:16:50 | 2020-04-05T16:16:50 | 253,275,964 | 0 | 0 | null | 2022-12-08T09:29:33 | 2020-04-05T16:09:50 | Python | UTF-8 | Python | false | false | 5,487 | py | """
Django settings for clishdeonly_15466 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "clishdeonly_15466.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "clishdeonly_15466.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
f67048f66378a43afb2fb6a7f34a413967c6126e | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/benchmarks/src/benchmarks/whoosh/src/whoosh/collectors.py | b8b899de17a5c9bcd22d50e7ed5f113e935d3ea2 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-2-Clause-Views",
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 42,165 | py | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains "collector" objects. Collectors provide a way to gather
"raw" results from a :class:`whoosh.matching.Matcher` object, implement
sorting, filtering, collation, etc., and produce a
:class:`whoosh.searching.Results` object.
The basic collectors are:
TopCollector
Returns the top N matching results sorted by score, using block-quality
optimizations to skip blocks of documents that can't contribute to the top
N. The :meth:`whoosh.searching.Searcher.search` method uses this type of
collector by default or when you specify a ``limit``.
UnlimitedCollector
Returns all matching results sorted by score. The
:meth:`whoosh.searching.Searcher.search` method uses this type of collector
when you specify ``limit=None`` or you specify a limit equal to or greater
than the number of documents in the searcher.
SortingCollector
Returns all matching results sorted by a :class:`whoosh.sorting.Facet`
object. The :meth:`whoosh.searching.Searcher.search` method uses this type
of collector when you use the ``sortedby`` parameter.
Here's an example of a simple collector that instead of remembering the matched
documents just counts up the number of matches::
class CountingCollector(Collector):
def prepare(self, top_searcher, q, context):
# Always call super method in prepare
Collector.prepare(self, top_searcher, q, context)
self.count = 0
def collect(self, sub_docnum):
self.count += 1
c = CountingCollector()
mysearcher.search_with_collector(myquery, c)
print(c.count)
There are also several wrapping collectors that extend or modify the
functionality of other collectors. The meth:`whoosh.searching.Searcher.search`
method uses many of these when you specify various parameters.
NOTE: collectors are not designed to be reentrant or thread-safe. It is
generally a good idea to create a new collector for each search.
"""
import os
import threading
from array import array
from bisect import insort
from collections import defaultdict
from heapq import heapify, heappush, heapreplace
from whoosh import sorting
from whoosh.compat import abstractmethod, iteritems, itervalues, xrange
from whoosh.searching import Results, TimeLimit
from whoosh.util import now
# Functions
def ilen(iterator):
total = 0
for _ in iterator:
total += 1
return total
# Base class
class Collector(object):
"""Base class for collectors.
"""
def prepare(self, top_searcher, q, context):
"""This method is called before a search.
Subclasses can override this to perform set-up work, but
they should still call the superclass's method because it sets several
necessary attributes on the collector object:
self.top_searcher
The top-level searcher.
self.q
The query object
self.context
``context.needs_current`` controls whether a wrapping collector
requires that this collector's matcher be in a valid state at every
call to ``collect()``. If this is ``False``, the collector is free
to use faster methods that don't necessarily keep the matcher
updated, such as ``matcher.all_ids()``.
:param top_searcher: the top-level :class:`whoosh.searching.Searcher`
object.
:param q: the :class:`whoosh.query.Query` object being searched for.
:param context: a :class:`whoosh.searching.SearchContext` object
containing information about the search.
"""
self.top_searcher = top_searcher
self.q = q
self.context = context
self.starttime = now()
self.runtime = None
self.docset = set()
def run(self):
# Collect matches for each sub-searcher
try:
for subsearcher, offset in self.top_searcher.leaf_searchers():
self.set_subsearcher(subsearcher, offset)
self.collect_matches()
finally:
self.finish()
def set_subsearcher(self, subsearcher, offset):
"""This method is called each time the collector starts on a new
sub-searcher.
Subclasses can override this to perform set-up work, but
they should still call the superclass's method because it sets several
necessary attributes on the collector object:
self.subsearcher
The current sub-searcher. If the top-level searcher is atomic, this
is the same as the top-level searcher.
self.offset
The document number offset of the current searcher. You must add
this number to the document number passed to
:meth:`Collector.collect` to get the top-level document number
for use in results.
self.matcher
A :class:`whoosh.matching.Matcher` object representing the matches
for the query in the current sub-searcher.
"""
self.subsearcher = subsearcher
self.offset = offset
self.matcher = self.q.matcher(subsearcher, self.context)
def computes_count(self):
"""Returns True if the collector naturally computes the exact number of
matching documents. Collectors that use block optimizations will return
False since they might skip blocks containing matching documents.
Note that if this method returns False you can still call :meth:`count`,
but it means that method might have to do more work to calculate the
number of matching documents.
"""
return True
def all_ids(self):
"""Returns a sequence of docnums matched in this collector. (Only valid
after the collector is run.)
The default implementation is based on the docset. If a collector does
not maintain the docset, it will need to override this method.
"""
return self.docset
def count(self):
"""Returns the total number of documents matched in this collector.
(Only valid after the collector is run.)
The default implementation is based on the docset. If a collector does
not maintain the docset, it will need to override this method.
"""
return len(self.docset)
def collect_matches(self):
"""This method calls :meth:`Collector.matches` and then for each
matched document calls :meth:`Collector.collect`. Sub-classes that
want to intervene between finding matches and adding them to the
collection (for example, to filter out certain documents) can override
this method.
"""
collect = self.collect
for sub_docnum in self.matches():
collect(sub_docnum)
@abstractmethod
def collect(self, sub_docnum):
"""This method is called for every matched document. It should do the
work of adding a matched document to the results, and it should return
an object to use as a "sorting key" for the given document (such as the
document's score, a key generated by a facet, or just None). Subclasses
must implement this method.
If you want the score for the current document, use
``self.matcher.score()``.
Overriding methods should add the current document offset
(``self.offset``) to the ``sub_docnum`` to get the top-level document
number for the matching document to add to results.
:param sub_docnum: the document number of the current match within the
current sub-searcher. You must add ``self.offset`` to this number
to get the document's top-level document number.
"""
raise NotImplementedError
@abstractmethod
def sort_key(self, sub_docnum):
"""Returns a sorting key for the current match. This should return the
same value returned by :meth:`Collector.collect`, but without the side
effect of adding the current document to the results.
If the collector has been prepared with ``context.needs_current=True``,
this method can use ``self.matcher`` to get information, for example
the score. Otherwise, it should only use the provided ``sub_docnum``,
since the matcher may be in an inconsistent state.
Subclasses must implement this method.
"""
raise NotImplementedError
def remove(self, global_docnum):
"""Removes a document from the collector. Not that this method uses the
global document number as opposed to :meth:`Collector.collect` which
takes a segment-relative docnum.
"""
items = self.items
for i in xrange(len(items)):
if items[i][1] == global_docnum:
items.pop(i)
return
raise KeyError(global_docnum)
def _step_through_matches(self):
matcher = self.matcher
while matcher.is_active():
yield matcher.id()
matcher.next()
def matches(self):
"""Yields a series of relative document numbers for matches
in the current subsearcher.
"""
# We jump through a lot of hoops to avoid stepping through the matcher
# "manually" if we can because all_ids() is MUCH faster
if self.context.needs_current:
return self._step_through_matches()
else:
return self.matcher.all_ids()
def finish(self):
"""This method is called after a search.
Subclasses can override this to perform set-up work, but
they should still call the superclass's method because it sets several
necessary attributes on the collector object:
self.runtime
The time (in seconds) the search took.
"""
self.runtime = now() - self.starttime
def _results(self, items, **kwargs):
# Fills in a Results object with the invariant information and the
# given "items" (a list of (score, docnum) tuples)
r = Results(self.top_searcher, self.q, items, **kwargs)
r.runtime = self.runtime
r.collector = self
return r
@abstractmethod
def results(self):
"""Returns a :class:`~whoosh.searching.Results` object containing the
results of the search. Subclasses must implement this method
"""
raise NotImplementedError
# Scored collectors
class ScoredCollector(Collector):
"""Base class for collectors that sort the results based on document score.
"""
def __init__(self, replace=10):
"""
:param replace: Number of matches between attempts to replace the
matcher with a more efficient version.
"""
Collector.__init__(self)
self.replace = replace
def prepare(self, top_searcher, q, context):
# This collector requires a valid matcher at each step
Collector.prepare(self, top_searcher, q, context)
if top_searcher.weighting.use_final:
self.final_fn = top_searcher.weighting.final
else:
self.final_fn = None
# Heap containing top N (score, 0-docnum) pairs
self.items = []
# Minimum score a document must have to make it into the top N. This is
# used by the block-quality optimizations
self.minscore = 0
# Number of times the matcher was replaced (for debugging)
self.replaced_times = 0
# Number of blocks skipped by quality optimizations (for debugging)
self.skipped_times = 0
def sort_key(self, sub_docnum):
return 0 - self.matcher.score()
def _collect(self, global_docnum, score):
# Concrete subclasses should override this method to collect matching
# documents
raise NotImplementedError
def _use_block_quality(self):
# Concrete subclasses should override this method to return True if the
# collector should use block quality optimizations
return False
def collect(self, sub_docnum):
# Do common work to calculate score and top-level document number
global_docnum = self.offset + sub_docnum
score = self.matcher.score()
if self.final_fn:
score = self.final_fn(self.top_searcher, global_docnum, score)
# Call specialized method on subclass
return self._collect(global_docnum, score)
def matches(self):
minscore = self.minscore
matcher = self.matcher
usequality = self._use_block_quality()
replace = self.replace
replacecounter = 0
# A flag to indicate whether we should check block quality at the start
# of the next loop
checkquality = True
while matcher.is_active():
# If the replacement counter has reached 0, try replacing the
# matcher with a more efficient version
if replace:
if replacecounter == 0 or self.minscore != minscore:
self.matcher = matcher = matcher.replace(minscore or 0)
self.replaced_times += 1
if not matcher.is_active():
break
usequality = self._use_block_quality()
replacecounter = self.replace
if self.minscore != minscore:
checkquality = True
minscore = self.minscore
replacecounter -= 1
# If we're using block quality optimizations, and the checkquality
# flag is true, try to skip ahead to the next block with the
# minimum required quality
if usequality and checkquality and minscore is not None:
self.skipped_times += matcher.skip_to_quality(minscore)
# Skipping ahead might have moved the matcher to the end of the
# posting list
if not matcher.is_active():
break
yield matcher.id()
# Move to the next document. This method returns True if the
# matcher has entered a new block, so we should check block quality
# again.
checkquality = matcher.next()
class TopCollector(ScoredCollector):
"""A collector that only returns the top "N" scored results.
"""
def __init__(self, limit=10, usequality=True, **kwargs):
"""
:param limit: the maximum number of results to return.
:param usequality: whether to use block-quality optimizations. This may
be useful for debugging.
"""
ScoredCollector.__init__(self, **kwargs)
self.limit = limit
self.usequality = usequality
self.total = 0
def _use_block_quality(self):
return (self.usequality
and not self.top_searcher.weighting.use_final
and self.matcher.supports_block_quality())
def computes_count(self):
return not self._use_block_quality()
def all_ids(self):
# Since this collector can skip blocks, it doesn't track the total
# number of matching documents, so if the user asks for all matched
# docs we need to re-run the search using docs_for_query
return self.top_searcher.docs_for_query(self.q)
def count(self):
if self.computes_count():
return self.total
else:
return ilen(self.all_ids())
# ScoredCollector.collect calls this
def _collect(self, global_docnum, score):
items = self.items
self.total += 1
# Document numbers are negated before putting them in the heap so that
# higher document numbers have lower "priority" in the queue. Lower
# document numbers should always come before higher document numbers
# with the same score to keep the order stable.
if len(items) < self.limit:
# The heap isn't full, so add this document
heappush(items, (score, 0 - global_docnum))
# Negate score to act as sort key so higher scores appear first
return 0 - score
elif score > items[0][0]:
# The heap is full, but if this document has a high enough
# score to make the top N, add it to the heap
heapreplace(items, (score, 0 - global_docnum))
self.minscore = items[0][0]
# Negate score to act as sort key so higher scores appear first
return 0 - score
else:
return 0
def remove(self, global_docnum):
negated = 0 - global_docnum
items = self.items
# Remove the document if it's on the list (it may not be since
# TopCollector forgets documents that don't make the top N list)
for i in xrange(len(items)):
if items[i][1] == negated:
items.pop(i)
# Restore the heap invariant
heapify(items)
self.minscore = items[0][0] if items else 0
return
def results(self):
# The items are stored (postive score, negative docnum) so the heap
# keeps the highest scores and lowest docnums, in order from lowest to
# highest. Since for the results we want the highest scores first,
# sort the heap in reverse order
items = self.items
items.sort(reverse=True)
# De-negate the docnums for presentation to the user
items = [(score, 0 - docnum) for score, docnum in items]
return self._results(items)
class UnlimitedCollector(ScoredCollector):
"""A collector that returns **all** scored results.
"""
def __init__(self, reverse=False):
ScoredCollector.__init__(self)
self.reverse = reverse
# ScoredCollector.collect calls this
def _collect(self, global_docnum, score):
self.items.append((score, global_docnum))
self.docset.add(global_docnum)
# Negate score to act as sort key so higher scores appear first
return 0 - score
def results(self):
# Sort by negated scores so that higher scores go first, then by
# document number to keep the order stable when documents have the
# same score
self.items.sort(key=lambda x: (0 - x[0], x[1]), reverse=self.reverse)
return self._results(self.items, docset=self.docset)
# Sorting collector
class SortingCollector(Collector):
"""A collector that returns results sorted by a given
:class:`whoosh.sorting.Facet` object. See :doc:`/facets` for more
information.
"""
def __init__(self, sortedby, limit=10, reverse=False):
"""
:param sortedby: see :doc:`/facets`.
:param reverse: If True, reverse the overall results. Note that you
can reverse individual facets in a multi-facet sort key as well.
"""
Collector.__init__(self)
self.sortfacet = sorting.MultiFacet.from_sortedby(sortedby)
self.limit = limit
self.reverse = reverse
def prepare(self, top_searcher, q, context):
self.categorizer = self.sortfacet.categorizer(top_searcher)
# If the categorizer requires a valid matcher, then tell the child
# collector that we need it
rm = context.needs_current or self.categorizer.needs_current
Collector.prepare(self, top_searcher, q, context.set(needs_current=rm))
# List of (sortkey, docnum) pairs
self.items = []
def set_subsearcher(self, subsearcher, offset):
Collector.set_subsearcher(self, subsearcher, offset)
self.categorizer.set_searcher(subsearcher, offset)
def sort_key(self, sub_docnum):
return self.categorizer.key_for(self.matcher, sub_docnum)
def collect(self, sub_docnum):
global_docnum = self.offset + sub_docnum
sortkey = self.sort_key(sub_docnum)
self.items.append((sortkey, global_docnum))
self.docset.add(global_docnum)
return sortkey
def results(self):
items = self.items
items.sort(reverse=self.reverse)
if self.limit:
items = items[:self.limit]
return self._results(items, docset=self.docset)
class UnsortedCollector(Collector):
def prepare(self, top_searcher, q, context):
Collector.prepare(self, top_searcher, q,
top_searcher.boolean_context())
self.items = []
def collect(self, sub_docnum):
global_docnum = self.offset + sub_docnum
self.items.append((None, global_docnum))
self.docset.add(global_docnum)
def results(self):
items = self.items
return self._results(items, docset=self.docset)
# Wrapping collectors
class WrappingCollector(Collector):
"""Base class for collectors that wrap other collectors.
"""
def __init__(self, child):
self.child = child
@property
def top_searcher(self):
return self.child.top_searcher
def prepare(self, top_searcher, q, context):
self.child.prepare(top_searcher, q, context)
def set_subsearcher(self, subsearcher, offset):
self.child.set_subsearcher(subsearcher, offset)
self.subsearcher = subsearcher
self.matcher = self.child.matcher
self.offset = self.child.offset
def all_ids(self):
return self.child.all_ids()
def count(self):
return self.child.count()
def collect_matches(self):
for sub_docnum in self.matches():
self.collect(sub_docnum)
def sort_key(self, sub_docnum):
return self.child.sort_key(sub_docnum)
def collect(self, sub_docnum):
return self.child.collect(sub_docnum)
def matches(self):
return self.child.matches()
def finish(self):
self.child.finish()
def results(self):
return self.child.results()
# Allow and disallow collector
class FilterCollector(WrappingCollector):
"""A collector that lets you allow and/or restrict certain document numbers
in the results::
uc = collectors.UnlimitedCollector()
ins = query.Term("chapter", "rendering")
outs = query.Term("status", "restricted")
fc = FilterCollector(uc, allow=ins, restrict=outs)
mysearcher.search_with_collector(myquery, fc)
print(fc.results())
This collector discards a document if:
* The allowed set is not None and a document number is not in the set, or
* The restrict set is not None and a document number is in the set.
(So, if the same document number is in both sets, that document will be
discarded.)
If you have a reference to the collector, you can use
``FilterCollector.filtered_count`` to get the number of matching documents
filtered out of the results by the collector.
"""
def __init__(self, child, allow=None, restrict=None):
"""
:param child: the collector to wrap.
:param allow: a query, Results object, or set-like object containing
docnument numbers that are allowed in the results, or None (meaning
everything is allowed).
:param restrict: a query, Results object, or set-like object containing
document numbers to disallow from the results, or None (meaning
nothing is disallowed).
"""
self.child = child
self.allow = allow
self.restrict = restrict
def prepare(self, top_searcher, q, context):
self.child.prepare(top_searcher, q, context)
allow = self.allow
restrict = self.restrict
ftc = top_searcher._filter_to_comb
self._allow = ftc(allow) if allow else None
self._restrict = ftc(restrict) if restrict else None
self.filtered_count = 0
def all_ids(self):
child = self.child
_allow = self._allow
_restrict = self._restrict
for global_docnum in child.all_ids():
if ((_allow and global_docnum not in _allow)
or (_restrict and global_docnum in _restrict)):
continue
yield global_docnum
def count(self):
child = self.child
if child.computes_count():
return child.count() - self.filtered_count
else:
return ilen(self.all_ids())
def collect_matches(self):
child = self.child
_allow = self._allow
_restrict = self._restrict
if _allow is not None or _restrict is not None:
filtered_count = self.filtered_count
for sub_docnum in child.matches():
global_docnum = self.offset + sub_docnum
if ((_allow is not None and global_docnum not in _allow)
or (_restrict is not None and global_docnum in _restrict)):
filtered_count += 1
continue
child.collect(sub_docnum)
self.filtered_count = filtered_count
else:
# If there was no allow or restrict set, don't do anything special,
# just forward the call to the child collector
child.collect_matches()
def results(self):
r = self.child.results()
r.filtered_count = self.filtered_count
r.allowed = self.allow
r.restricted = self.restrict
return r
# Facet grouping collector
class FacetCollector(WrappingCollector):
"""A collector that creates groups of documents based on
:class:`whoosh.sorting.Facet` objects. See :doc:`/facets` for more
information.
This collector is used if you specify a ``groupedby`` parameter in the
:meth:`whoosh.searching.Searcher.search` method. You can use the
:meth:`whoosh.searching.Results.groups` method to access the facet groups.
If you have a reference to the collector can also use
``FacetedCollector.facetmaps`` to access the groups directly::
uc = collectors.UnlimitedCollector()
fc = FacetedCollector(uc, sorting.FieldFacet("category"))
mysearcher.search_with_collector(myquery, fc)
print(fc.facetmaps)
"""
def __init__(self, child, groupedby, maptype=None):
"""
:param groupedby: see :doc:`/facets`.
:param maptype: a :class:`whoosh.sorting.FacetMap` type to use for any
facets that don't specify their own.
"""
self.child = child
self.facets = sorting.Facets.from_groupedby(groupedby)
self.maptype = maptype
def prepare(self, top_searcher, q, context):
facets = self.facets
# For each facet we're grouping by:
# - Create a facetmap (to hold the groups)
# - Create a categorizer (to generate document keys)
self.facetmaps = {}
self.categorizers = {}
# Set needs_current to True if any of the categorizers require the
# current document to work
needs_current = context.needs_current
for facetname, facet in facets.items():
self.facetmaps[facetname] = facet.map(self.maptype)
ctr = facet.categorizer(top_searcher)
self.categorizers[facetname] = ctr
needs_current = needs_current or ctr.needs_current
context = context.set(needs_current=needs_current)
self.child.prepare(top_searcher, q, context)
def set_subsearcher(self, subsearcher, offset):
WrappingCollector.set_subsearcher(self, subsearcher, offset)
# Tell each categorizer about the new subsearcher and offset
for categorizer in itervalues(self.categorizers):
categorizer.set_searcher(self.child.subsearcher, self.child.offset)
def collect(self, sub_docnum):
matcher = self.child.matcher
global_docnum = sub_docnum + self.child.offset
# We want the sort key for the document so we can (by default) sort
# the facet groups
sortkey = self.child.collect(sub_docnum)
# For each facet we're grouping by
for name, categorizer in iteritems(self.categorizers):
add = self.facetmaps[name].add
# We have to do more work if the facet allows overlapping groups
if categorizer.allow_overlap:
for key in categorizer.keys_for(matcher, sub_docnum):
add(categorizer.key_to_name(key), global_docnum, sortkey)
else:
key = categorizer.key_for(matcher, sub_docnum)
key = categorizer.key_to_name(key)
add(key, global_docnum, sortkey)
return sortkey
def results(self):
r = self.child.results()
r._facetmaps = self.facetmaps
return r
# Collapsing collector
class CollapseCollector(WrappingCollector):
"""A collector that collapses results based on a facet. That is, it
eliminates all but the top N results that share the same facet key.
Documents with an empty key for the facet are never eliminated.
The "top" results within each group is determined by the result ordering
(e.g. highest score in a scored search) or an optional second "ordering"
facet.
If you have a reference to the collector you can use
``CollapseCollector.collapsed_counts`` to access the number of documents
eliminated based on each key::
tc = TopCollector(limit=20)
cc = CollapseCollector(tc, "group", limit=3)
mysearcher.search_with_collector(myquery, cc)
print(cc.collapsed_counts)
See :ref:`collapsing` for more information.
"""
def __init__(self, child, keyfacet, limit=1, order=None):
"""
:param child: the collector to wrap.
:param keyfacet: a :class:`whoosh.sorting.Facet` to use for collapsing.
All but the top N documents that share a key will be eliminated
from the results.
:param limit: the maximum number of documents to keep for each key.
:param order: an optional :class:`whoosh.sorting.Facet` to use
to determine the "top" document(s) to keep when collapsing. The
default (``orderfaceet=None``) uses the results order (e.g. the
highest score in a scored search).
"""
self.child = child
self.keyfacet = sorting.MultiFacet.from_sortedby(keyfacet)
self.limit = limit
if order:
self.orderfacet = sorting.MultiFacet.from_sortedby(order)
else:
self.orderfacet = None
def prepare(self, top_searcher, q, context):
# Categorizer for getting the collapse key of a document
self.keyer = self.keyfacet.categorizer(top_searcher)
# Categorizer for getting the collapse order of a document
self.orderer = None
if self.orderfacet:
self.orderer = self.orderfacet.categorizer(top_searcher)
# Dictionary mapping keys to lists of (sortkey, global_docnum) pairs
# representing the best docs for that key
self.lists = defaultdict(list)
# Dictionary mapping keys to the number of documents that have been
# filtered out with that key
self.collapsed_counts = defaultdict(int)
# Total number of documents filtered out by collapsing
self.collapsed_total = 0
# If the keyer or orderer require a valid matcher, tell the child
# collector we need it
needs_current = (context.needs_current
or self.keyer.needs_current
or (self.orderer and self.orderer.needs_current))
self.child.prepare(top_searcher, q,
context.set(needs_current=needs_current))
def set_subsearcher(self, subsearcher, offset):
WrappingCollector.set_subsearcher(self, subsearcher, offset)
# Tell the keyer and (optional) orderer about the new subsearcher
self.keyer.set_searcher(subsearcher, offset)
if self.orderer:
self.orderer.set_searcher(subsearcher, offset)
def all_ids(self):
child = self.child
limit = self.limit
counters = defaultdict(int)
for subsearcher, offset in child.subsearchers():
self.set_subsearcher(subsearcher, offset)
matcher = child.matcher
keyer = self.keyer
for sub_docnum in child.matches():
ckey = keyer.key_for(matcher, sub_docnum)
if ckey is not None:
if ckey in counters and counters[ckey] >= limit:
continue
else:
counters[ckey] += 1
yield offset + sub_docnum
def count(self):
if self.child.computes_count():
return self.child.count() - self.collapsed_total
else:
return ilen(self.all_ids())
def collect_matches(self):
lists = self.lists
limit = self.limit
keyer = self.keyer
orderer = self.orderer
collapsed_counts = self.collapsed_counts
child = self.child
matcher = child.matcher
offset = child.offset
for sub_docnum in child.matches():
# Collapsing category key
ckey = keyer.key_to_name(keyer.key_for(matcher, sub_docnum))
if not ckey:
# If the document isn't in a collapsing category, just add it
child.collect(sub_docnum)
else:
global_docnum = offset + sub_docnum
if orderer:
# If user specified a collapse order, use it
sortkey = orderer.key_for(child.matcher, sub_docnum)
else:
# Otherwise, use the results order
sortkey = child.sort_key(sub_docnum)
# Current list of best docs for this collapse key
best = lists[ckey]
add = False
if len(best) < limit:
# If the heap is not full yet, just add this document
add = True
elif sortkey < best[-1][0]:
# If the heap is full but this document has a lower sort
# key than the highest key currently on the heap, replace
# the "least-best" document
# Tell the child collector to remove the document
child.remove(best.pop()[1])
add = True
if add:
insort(best, (sortkey, global_docnum))
child.collect(sub_docnum)
else:
# Remember that a document was filtered
collapsed_counts[ckey] += 1
self.collapsed_total += 1
def results(self):
r = self.child.results()
r.collapsed_counts = self.collapsed_counts
return r
# Time limit collector
class TimeLimitCollector(WrappingCollector):
"""A collector that raises a :class:`TimeLimit` exception if the search
does not complete within a certain number of seconds::
uc = collectors.UnlimitedCollector()
tlc = TimeLimitedCollector(uc, timelimit=5.8)
try:
mysearcher.search_with_collector(myquery, tlc)
except collectors.TimeLimit:
print("The search ran out of time!")
# We can still get partial results from the collector
print(tlc.results())
IMPORTANT: On Unix systems (systems where signal.SIGALRM is defined), the
code uses signals to stop searching immediately when the time limit is
reached. On Windows, the OS does not support this functionality, so the
search only checks the time between each found document, so if a matcher
is slow the search could exceed the time limit.
"""
def __init__(self, child, timelimit, greedy=False, use_alarm=True):
"""
:param child: the collector to wrap.
:param timelimit: the maximum amount of time (in seconds) to
allow for searching. If the search takes longer than this, it will
raise a ``TimeLimit`` exception.
:param greedy: if ``True``, the collector will finish adding the most
recent hit before raising the ``TimeLimit`` exception.
:param use_alarm: if ``True`` (the default), the collector will try to
use signal.SIGALRM (on UNIX).
"""
self.child = child
self.timelimit = timelimit
self.greedy = greedy
if use_alarm:
import signal
self.use_alarm = use_alarm and hasattr(signal, "SIGALRM")
else:
self.use_alarm = False
self.timer = None
self.timedout = False
def prepare(self, top_searcher, q, context):
self.child.prepare(top_searcher, q, context)
self.timedout = False
if self.use_alarm:
import signal
signal.signal(signal.SIGALRM, self._was_signaled)
# Start a timer thread. If the timer fires, it will call this object's
# _timestop() method
self.timer = threading.Timer(self.timelimit, self._timestop)
self.timer.start()
def _timestop(self):
# Called when the timer expires
self.timer = None
# Set an attribute that will be noticed in the collect_matches() loop
self.timedout = True
if self.use_alarm:
import signal
os.kill(os.getpid(), signal.SIGALRM)
def _was_signaled(self, signum, frame):
raise TimeLimit
def collect_matches(self):
child = self.child
greedy = self.greedy
for sub_docnum in child.matches():
# If the timer fired since the last loop and we're not greedy,
# raise the exception
if self.timedout and not greedy:
raise TimeLimit
child.collect(sub_docnum)
# If the timer fired since we entered the loop or it fired earlier
# but we were greedy, raise now
if self.timedout:
raise TimeLimit
def finish(self):
if self.timer:
self.timer.cancel()
self.timer = None
self.child.finish()
# Matched terms collector
class TermsCollector(WrappingCollector):
"""A collector that remembers which terms appeared in which terms appeared
in each matched document.
This collector is used if you specify ``terms=True`` in the
:meth:`whoosh.searching.Searcher.search` method.
If you have a reference to the collector can also use
``TermsCollector.termslist`` to access the term lists directly::
uc = collectors.UnlimitedCollector()
tc = TermsCollector(uc)
mysearcher.search_with_collector(myquery, tc)
# tc.termdocs is a dictionary mapping (fieldname, text) tuples to
# sets of document numbers
print(tc.termdocs)
# tc.docterms is a dictionary mapping docnums to lists of
# (fieldname, text) tuples
print(tc.docterms)
"""
def __init__(self, child, settype=set):
self.child = child
self.settype = settype
def prepare(self, top_searcher, q, context):
# This collector requires a valid matcher at each step
self.child.prepare(top_searcher, q, context.set(needs_current=True))
# A dictionary mapping (fieldname, text) pairs to arrays of docnums
self.termdocs = defaultdict(lambda: array("I"))
# A dictionary mapping docnums to lists of (fieldname, text) pairs
self.docterms = defaultdict(list)
def set_subsearcher(self, subsearcher, offset):
WrappingCollector.set_subsearcher(self, subsearcher, offset)
# Store a list of all the term matchers in the matcher tree
self.termmatchers = list(self.child.matcher.term_matchers())
def collect(self, sub_docnum):
child = self.child
termdocs = self.termdocs
docterms = self.docterms
child.collect(sub_docnum)
global_docnum = child.offset + sub_docnum
# For each term matcher...
for tm in self.termmatchers:
# If the term matcher is matching the current document...
if tm.is_active() and tm.id() == sub_docnum:
# Add it to the list of matching documents for the term
term = tm.term()
termdocs[term].append(global_docnum)
docterms[global_docnum].append(term)
def results(self):
r = self.child.results()
r.termdocs = dict(self.termdocs)
r.docterms = dict(self.docterms)
return r
| [
"ndrzmansn@gmail.com"
] | ndrzmansn@gmail.com |
fa9a1379d28d85b2fc26b90fae36448da8f77891 | 18f8abb90efece37949f5b5758c7752b1602fb12 | /py/django_tools/django-haystack/haystack/management/commands/clear_index.py | d99fb5bb36e64bb0017632e707949d2afb342da0 | [
"BSD-3-Clause",
"MIT"
] | permissive | marceltoben/evandrix.github.com | caa7d4c2ef84ba8c5a9a6ace2126e8fd6db1a516 | abc3fbfb34f791f84e9a9d4dc522966421778ab2 | refs/heads/master | 2021-08-02T06:18:12.953567 | 2011-08-23T16:49:33 | 2011-08-23T16:49:33 | 2,267,457 | 3 | 5 | null | 2021-07-28T11:39:25 | 2011-08-25T11:18:56 | C | UTF-8 | Python | false | false | 1,778 | py | from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from haystack.constants import DEFAULT_ALIAS
class Command(BaseCommand):
help = "Clears out the search index completely."
base_options = (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='If provided, no prompts will be issued to the user and the data will be wiped out.'
),
make_option("-u", "--using", action="store", type="string", dest="using", default=DEFAULT_ALIAS,
help='If provided, chooses a connection to work with.'
),
)
option_list = BaseCommand.option_list + base_options
def handle(self, **options):
"""Clears out the search index completely."""
from haystack import connections
self.verbosity = int(options.get('verbosity', 1))
self.using = options.get('using')
if options.get('interactive', True):
print
print "WARNING: This will irreparably remove EVERYTHING from your search index in connection '%s'." % self.using
print "Your choices after this are to restore from backups or rebuild via the `rebuild_index` command."
yes_or_no = raw_input("Are you sure you wish to continue? [y/N] ")
print
if not yes_or_no.lower().startswith('y'):
print "No action taken."
sys.exit()
if self.verbosity >= 1:
print "Removing all documents from your index because you said so."
backend = connections[self.using].get_backend()
backend.clear()
if self.verbosity >= 1:
print "All documents removed."
| [
"evandrix@gmail.com"
] | evandrix@gmail.com |
853bbc1268691dba67496264c74d9d41b009b1e5 | a84bc49dba142c4047a23d85917f411f42e196aa | /doc/src/web4sa/src-web4sa/apps/flask_apps/vib5/generate.py | 7abac724856713c9b47a14879f7a847d3b93ceb1 | [] | no_license | TZer0/web4sciapps | 40297ec4efd7bcad58514bfd891b6090eff7ff1f | cdcda068bbb44929a48d221410c635fa4d73da9c | refs/heads/master | 2021-01-22T08:59:27.682367 | 2014-09-23T11:45:05 | 2014-09-23T11:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from parampool.generator.flask import generate
from compute import compute_gamma
generate(compute_gamma, default_field='FloatField', enable_login=True)
| [
"hpl@simula.no"
] | hpl@simula.no |
1ac68e78d93c953c0944986d901463430077c9d2 | bcc4390952e8ddf257c1daa417bc06f1565e2346 | /source/ch01/sum2.py | 5109147e9a5e38a453c3b7a46c62401b2f56e02e | [
"MIT",
"GPL-3.0-only",
"GPL-1.0-or-later",
"LGPL-2.0-or-later"
] | permissive | AngelLiang/programming-in-python3-2nd-edition | 0ef80d4ba2cd096de1bb589dddf294c9d27c320c | 8f9a6ab6768a10e94daef641009288de6845245f | refs/heads/master | 2022-08-05T19:52:40.072130 | 2019-12-31T08:10:01 | 2019-12-31T08:10:01 | 230,700,866 | 1 | 0 | MIT | 2022-07-29T23:04:40 | 2019-12-29T04:09:05 | Python | UTF-8 | Python | false | false | 1,028 | py | #!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
print("Type integers, each followed by Enter; or ^D or ^Z to finish")
total = 0
count = 0
while True:
try:
line = input()
if line:
number = int(line)
total += number
count += 1
except ValueError as err:
print(err)
continue
except EOFError:
break
if count:
print("count =", count, "total =", total, "mean =", total / count)
| [
"pl01665077@163.com"
] | pl01665077@163.com |
14ee85625cc4b9860990a7d60e105505d91307b6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_262/ch40_2020_04_06_14_18_08_199463.py | 7f129ea01af99b351af41da08835bf5433118189 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | lista=[]
i=0
def soma_valores(lista):
while i<leng(lista):
i+=1
lista+=lista | [
"you@example.com"
] | you@example.com |
2efcc17f3e10fb0decca68df806443b7a7f44f08 | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Stack/largest_rectangle_in_histogram.py | 95c663a2b334ed2257f15635bc5dd2630ed6f5a4 | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
if not heights or len(heights) == 0:return 0
hist_len = len(heights)
stack = []
maxArea = 0
i = 0
while i <= hist_len:
h = 0 if i == hist_len else heights[i]
if not stack or h >= heights[stack[-1]]:
stack.append(i)
else:
currMax = stack.pop()
maxArea = max(maxArea, heights[currMax] * (i if not stack else (i - 1 - stack[-1])))
i -= 1
i += 1
return maxArea | [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
98399b23e71e050447773e5b3aafe81bf176c63a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03722/s716342934.py | e7c9f2de48ff64e451b266c19f1a41933248d364 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | import sys
input=sys.stdin.readline
n,m=map(int,input().split())
graph=[]
for _ in range(m):
a,b,c=map(int,input().split())
graph.append([a-1,b-1,-c])
def BellmanFord(n,m,graph):
costs=[float("inf")]*n
costs[0]=0
for _ in range(n-1):
for i in range(m):
if costs[graph[i][1]]>costs[graph[i][0]]+graph[i][2]:
costs[graph[i][1]]=costs[graph[i][0]]+graph[i][2]
newcosts=[]
for i in costs:
newcosts.append(i)
for _ in range(n):
for i in range(m):
if newcosts[graph[i][1]]>newcosts[graph[i][0]]+graph[i][2]:
newcosts[graph[i][1]]=newcosts[graph[i][0]]+graph[i][2]
if newcosts[n-1]!=costs[n-1]:
return "inf"
else:
return -costs[n-1]
print(BellmanFord(n,m,graph)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2428731975a9ea83206a64765f3ef4a9c41eb485 | 4472e40c53ca3e1df4f9e477a6268133309b7597 | /_unittests/ut_notebooks/test_LONG_2A_notebook_3B_correction.py | bdd5996c9425a33da74130e2fd6738e43642c400 | [
"MIT"
] | permissive | amoussoubaruch/ensae_teaching_cs | 289729742608da064f07a79b10cf6cce48de1b51 | 313a6ccb8756dbaa4c52724839b69af8a5f4476e | refs/heads/master | 2021-01-16T19:31:49.734583 | 2016-09-09T08:29:58 | 2016-09-09T08:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | """
@brief test log(time=620s)
notebook test
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
class TestNotebookRunner2a_3B_correction (unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails"],
__file__, hide=True)
def test_notebook_runner_correction(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from src.ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, unittest_raise_exception_notebook, clean_function_1a
temp = get_temp_folder(__file__, "temp_notebook2a_3B_correction")
keepnote = ls_notebooks("td2a")
assert len(keepnote) > 0
res = execute_notebooks(
temp, keepnote, lambda i, n: "_3B" in n and "correction" in n,
clean_function=clean_function_1a)
unittest_raise_exception_notebook(res, fLOG)
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@ensae.fr"
] | xavier.dupre@ensae.fr |
b248edbd3bfea1ed54561ee19f126b3ef7302301 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/twentyPercent/rank_1e96_D.py | 9dc7a74058ecda9aad5e9d179b6bcbbf0bea7a90 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,392 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '1e96.csv'
identifier = 'D'
coefFrac = 0.2
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/twentyPercent/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/twentyPercent/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
#df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Keep coefficients within the given fraction when ordered by decreasing order of coefficient magnitude
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs['absVal'] = np.abs(coefs['coefficients'])
coefs.sort_values(by = 'absVal', ascending = False, inplace = True)
coefs = coefs[:int(14028 * coefFrac + 0.5)]
keepList = list(coefs.index)
del coefs
df1 = df1[keepList]
df1 = df1.reindex(sorted(df1.columns), axis = 1)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"tanemur1@msu.edu"
] | tanemur1@msu.edu |
661f8b888e79a6f44694634cfd74115ed4dae3e8 | 3821860187e15a4235d541e7db510732c18212b0 | /tasks/views.py | fab3922931a2e2c9190ef6eba0db6b38369398da | [] | no_license | memadd/todo | 15fb5901a87b162bb793c0b9f4c73674e38bab8f | 3ed0acc15596964c50eca863b01fdddff7f5586d | refs/heads/master | 2021-04-02T18:33:27.582092 | 2020-03-31T22:18:29 | 2020-03-31T22:18:29 | 248,308,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import *
from .forms import *
# Create your views here.
def index(request):
tasks = Task.objects.all()
form = TaskForm()
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('/')
context = {'tasks':tasks, 'form':form}
return render (request, 'tasks/list.html', context)
def update_task(request, pk):
task = Task.objects.get(id=pk)
form = TaskForm(instance=task)
if request.method == 'POST':
form = TaskForm(request.POST, instance=task)
if form.is_valid():
form.save()
return redirect('/')
context = {'form':form}
return render (request, 'tasks/update_task.html', context)
def delete_task(request, pk):
item = Task.objects.get(id=pk)
if request.method == 'POST':
item.delete()
return redirect('/')
context = {'item': item}
return render(request, 'tasks/delete.html',context) | [
"memad632@gmail.com"
] | memad632@gmail.com |
f9169b2aac3ad5c32eb0cb07c38f5fe71fefbb5f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_205/ch84_2020_04_07_23_54_12_724713.py | 4c822726ed79558cc3825e318a52fc3229b76091 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | def inverte_dicionario (dic):
inverte = {}
for chave in dic.keys():
for valores in dic.values():
inverte[valores]=dic[valores]
return inverte | [
"you@example.com"
] | you@example.com |
76459a2392101ae49b4a1efd11c83b37c7c40025 | ef3ac1664accfe2f4f28800cb3dde383d04e2636 | /max possible score.py | 65c014f4f77dfe2708045c18bbc0c6484be1475d | [] | no_license | Shamabanu/python | 2466b253ead7249147844e22ede9017a2ffb299a | 76350525586b285773edb58912c1ba8eee35d1a6 | refs/heads/master | 2020-03-27T15:45:09.838053 | 2019-08-14T15:06:18 | 2019-08-14T15:06:18 | 146,736,750 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | def fac(c1,c2):
k=1
for m in range(c2+1,c1+1):
k*=m
return k
t=int(input())
ab=[]
for m in range(t):
ab.append(list(map(int,input().split())))
for j in ab:
n=fac(j[0],j[1])
c=0
while n>1:
x=2
while x<n+1:
if n%x==0:
n=n/x
c+=1
break
x+=1
print(c)
| [
"noreply@github.com"
] | Shamabanu.noreply@github.com |
32bc769627d10487455c81d6d3378ced34473b01 | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/networksecurity/v1beta1/tls_inspection_policy.py | 5ddbde955427cf65f6378ab8c8e4a45b0d9f96d9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 12,904 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['TlsInspectionPolicyArgs', 'TlsInspectionPolicy']
@pulumi.input_type
class TlsInspectionPolicyArgs:
def __init__(__self__, *,
ca_pool: pulumi.Input[str],
tls_inspection_policy_id: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TlsInspectionPolicy resource.
:param pulumi.Input[str] ca_pool: A CA pool resource used to issue interception certificates. The CA pool string has a relative resource path following the form "projects/{project}/locations/{location}/caPools/{ca_pool}".
:param pulumi.Input[str] tls_inspection_policy_id: Required. Short name of the TlsInspectionPolicy resource to be created. This value should be 1-63 characters long, containing only letters, numbers, hyphens, and underscores, and should not start with a number. E.g. "tls_inspection_policy1".
:param pulumi.Input[str] description: Optional. Free-text description of the resource.
:param pulumi.Input[str] name: Name of the resource. Name is of the form projects/{project}/locations/{location}/tlsInspectionPolicies/{tls_inspection_policy} tls_inspection_policy should match the pattern:(^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$).
"""
pulumi.set(__self__, "ca_pool", ca_pool)
pulumi.set(__self__, "tls_inspection_policy_id", tls_inspection_policy_id)
if description is not None:
pulumi.set(__self__, "description", description)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="caPool")
def ca_pool(self) -> pulumi.Input[str]:
"""
A CA pool resource used to issue interception certificates. The CA pool string has a relative resource path following the form "projects/{project}/locations/{location}/caPools/{ca_pool}".
"""
return pulumi.get(self, "ca_pool")
@ca_pool.setter
def ca_pool(self, value: pulumi.Input[str]):
pulumi.set(self, "ca_pool", value)
@property
@pulumi.getter(name="tlsInspectionPolicyId")
def tls_inspection_policy_id(self) -> pulumi.Input[str]:
"""
Required. Short name of the TlsInspectionPolicy resource to be created. This value should be 1-63 characters long, containing only letters, numbers, hyphens, and underscores, and should not start with a number. E.g. "tls_inspection_policy1".
"""
return pulumi.get(self, "tls_inspection_policy_id")
@tls_inspection_policy_id.setter
def tls_inspection_policy_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tls_inspection_policy_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optional. Free-text description of the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Name is of the form projects/{project}/locations/{location}/tlsInspectionPolicies/{tls_inspection_policy} tls_inspection_policy should match the pattern:(^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$).
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class TlsInspectionPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ca_pool: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
tls_inspection_policy_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new TlsInspectionPolicy in a given project and location.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] ca_pool: A CA pool resource used to issue interception certificates. The CA pool string has a relative resource path following the form "projects/{project}/locations/{location}/caPools/{ca_pool}".
:param pulumi.Input[str] description: Optional. Free-text description of the resource.
:param pulumi.Input[str] name: Name of the resource. Name is of the form projects/{project}/locations/{location}/tlsInspectionPolicies/{tls_inspection_policy} tls_inspection_policy should match the pattern:(^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$).
:param pulumi.Input[str] tls_inspection_policy_id: Required. Short name of the TlsInspectionPolicy resource to be created. This value should be 1-63 characters long, containing only letters, numbers, hyphens, and underscores, and should not start with a number. E.g. "tls_inspection_policy1".
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TlsInspectionPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new TlsInspectionPolicy in a given project and location.
:param str resource_name: The name of the resource.
:param TlsInspectionPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TlsInspectionPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ca_pool: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
tls_inspection_policy_id: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TlsInspectionPolicyArgs.__new__(TlsInspectionPolicyArgs)
if ca_pool is None and not opts.urn:
raise TypeError("Missing required property 'ca_pool'")
__props__.__dict__["ca_pool"] = ca_pool
__props__.__dict__["description"] = description
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
if tls_inspection_policy_id is None and not opts.urn:
raise TypeError("Missing required property 'tls_inspection_policy_id'")
__props__.__dict__["tls_inspection_policy_id"] = tls_inspection_policy_id
__props__.__dict__["create_time"] = None
__props__.__dict__["update_time"] = None
replace_on_changes = pulumi.ResourceOptions(replace_on_changes=["location", "project", "tls_inspection_policy_id"])
opts = pulumi.ResourceOptions.merge(opts, replace_on_changes)
super(TlsInspectionPolicy, __self__).__init__(
'google-native:networksecurity/v1beta1:TlsInspectionPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TlsInspectionPolicy':
"""
Get an existing TlsInspectionPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TlsInspectionPolicyArgs.__new__(TlsInspectionPolicyArgs)
__props__.__dict__["ca_pool"] = None
__props__.__dict__["create_time"] = None
__props__.__dict__["description"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["project"] = None
__props__.__dict__["tls_inspection_policy_id"] = None
__props__.__dict__["update_time"] = None
return TlsInspectionPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="caPool")
def ca_pool(self) -> pulumi.Output[str]:
"""
A CA pool resource used to issue interception certificates. The CA pool string has a relative resource path following the form "projects/{project}/locations/{location}/caPools/{ca_pool}".
"""
return pulumi.get(self, "ca_pool")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The timestamp when the resource was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Optional. Free-text description of the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Name is of the form projects/{project}/locations/{location}/tlsInspectionPolicies/{tls_inspection_policy} tls_inspection_policy should match the pattern:(^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$).
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="tlsInspectionPolicyId")
def tls_inspection_policy_id(self) -> pulumi.Output[str]:
"""
Required. Short name of the TlsInspectionPolicy resource to be created. This value should be 1-63 characters long, containing only letters, numbers, hyphens, and underscores, and should not start with a number. E.g. "tls_inspection_policy1".
"""
return pulumi.get(self, "tls_inspection_policy_id")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
The timestamp when the resource was updated.
"""
return pulumi.get(self, "update_time")
| [
"noreply@github.com"
] | pulumi.noreply@github.com |
540ddc515614afa96a6c6e81efdcad3a4d539484 | 4feaf520374804d6f3feebe3700fb448692a44ba | /pullenti/ner/org/internal/OrgItemNumberToken.py | 9a6de49d739833dc981c94e92e6055a792890f6b | [] | no_license | MihaJjDa/APCLtask | f7be3fb6b0f31801196bf779f6a7e62ce245493b | 4745b45e199887d433ab256bb2e2ebf5dbe3f7cd | refs/heads/master | 2020-04-16T17:15:10.846647 | 2020-02-24T16:06:43 | 2020-02-24T16:06:43 | 165,769,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | # Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
from pullenti.unisharp.Utils import Utils
from pullenti.ner.Token import Token
from pullenti.ner.MetaToken import MetaToken
from pullenti.ner.NumberToken import NumberToken
from pullenti.ner.TextToken import TextToken
from pullenti.ner.core.NumberHelper import NumberHelper
from pullenti.ner.core.MiscHelper import MiscHelper
class OrgItemNumberToken(MetaToken):
def __init__(self, begin : 'Token', end : 'Token') -> None:
super().__init__(begin, end, None)
self.number = None;
def __str__(self) -> str:
return "№ {0}".format(Utils.ifNotNull(self.number, "?"))
@staticmethod
def tryAttach(t : 'Token', can_be_pure_number : bool=False, typ : 'OrgItemTypeToken'=None) -> 'OrgItemNumberToken':
if (t is None):
return None
tt = Utils.asObjectOrNull(t, TextToken)
if (tt is not None):
t1 = MiscHelper.checkNumberPrefix(tt)
if ((isinstance(t1, NumberToken)) and not t1.is_newline_before):
return OrgItemNumberToken._new1704(tt, t1, str((t1).value))
if ((t.is_hiphen and (isinstance(t.next0_, NumberToken)) and not t.is_whitespace_before) and not t.is_whitespace_after):
if (NumberHelper.tryParseAge(t.next0_) is None):
return OrgItemNumberToken._new1704(t, t.next0_, str((t.next0_).value))
if (isinstance(t, NumberToken)):
if ((not t.is_whitespace_before and t.previous is not None and t.previous.is_hiphen)):
return OrgItemNumberToken._new1704(t, t, str((t).value))
if (typ is not None and typ.typ is not None and (((typ.typ == "войсковая часть" or typ.typ == "військова частина" or "колония" in typ.typ) or "колонія" in typ.typ))):
if (t.length_char >= 4 or t.length_char <= 6):
res = OrgItemNumberToken._new1704(t, t, str((t).value))
if (t.next0_ is not None and ((t.next0_.is_hiphen or t.next0_.isCharOf("\\/"))) and not t.next0_.is_whitespace_after):
if ((isinstance(t.next0_.next0_, NumberToken)) and ((t.length_char + t.next0_.next0_.length_char) < 9)):
res.end_token = t.next0_.next0_
res.number = "{0}-{1}".format(res.number, (res.end_token).value)
elif ((isinstance(t.next0_.next0_, TextToken)) and t.next0_.next0_.length_char == 1 and t.next0_.next0_.chars.is_letter):
res.end_token = t.next0_.next0_
res.number = "{0}{1}".format(res.number, (res.end_token).term)
elif ((isinstance(t.next0_, TextToken)) and t.next0_.length_char == 1 and t.next0_.chars.is_letter):
res.end_token = t.next0_
res.number = "{0}{1}".format(res.number, (res.end_token).term)
return res
if (((isinstance(t, TextToken)) and t.length_char == 1 and t.chars.is_letter) and not t.is_whitespace_after):
if (typ is not None and typ.typ is not None and (((typ.typ == "войсковая часть" or typ.typ == "військова частина" or "колония" in typ.typ) or "колонія" in typ.typ))):
tt1 = t.next0_
if (tt1 is not None and tt1.is_hiphen):
tt1 = tt1.next0_
if ((isinstance(tt1, NumberToken)) and not tt1.is_whitespace_before):
res = OrgItemNumberToken(t, tt1)
res.number = "{0}{1}".format((t).term, (tt1).value)
return res
return None
@staticmethod
def _new1704(_arg1 : 'Token', _arg2 : 'Token', _arg3 : str) -> 'OrgItemNumberToken':
res = OrgItemNumberToken(_arg1, _arg2)
res.number = _arg3
return res | [
"danila.puchkin@mail.ru"
] | danila.puchkin@mail.ru |
9fe80f0e87dfc1126fed1e23de9636b732dc37f6 | 2090b6b92d5cada89504de548b14f9c729856606 | /visualize/gmt/helpers/generate_gmt_station_list.py | a08d5762939b547fbd2afd76dd6ba239210ef41b | [] | no_license | ziyixiArchive/Japan_Slab_code | 4f6a366889278ad499971cf1132591b9029c0f8c | 4cb19939e45739faee7a8b6ec3d3a5da4549a108 | refs/heads/master | 2022-03-14T18:11:47.768695 | 2019-12-17T21:48:32 | 2019-12-17T21:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | import numpy as np
import click
CEA_NETWORKS = ["AH", "BJ", "BU", "CQ", "FJ", "GD", "GS", "GX", "GZ", "HA", "HB", "HE", "HI", "HL", "HN",
"JL", "JS", "JX", "LN", "NM", "NX", "QH", "SC", "SD", "SH", "SN", "SX", "TJ", "XJ", "XZ", "YN", "ZJ"]
@click.command()
@click.option('--stations_file', required=True, type=str)
@click.option('--output_file', required=True, type=str)
def main(stations_file, output_file):
stations = np.loadtxt(stations_file, dtype=np.str)
with open(output_file, "w") as f:
for row in stations:
net = row[1]
if(net in CEA_NETWORKS):
net = 0
elif(net == "BO"):
net = 1
elif(net == "KG"):
net = 2
elif(net == "XL"):
net = 3
elif(net == "8B"):
net = 4
elif(net == "YP"):
net = 5
elif(net == "X4"):
net = 6
else:
net = 7
f.write(f"{row[3]} {row[2]} {net}\n")
if __name__ == "__main__":
main()
| [
"xiziyi@msu.edu"
] | xiziyi@msu.edu |
a212d11a29b6161c29d2539135a62e3803d7c7ca | 3f09e77f169780968eb4bd5dc24b6927ed87dfa2 | /src/Problems/Binary_Tree_Maximum_Path_Sum.py | cf6ee11ab89c3fc0ae2de014dbe7b1d837f3788f | [] | no_license | zouyuanrenren/Leetcode | ad921836256c31e31cf079cf8e671a8f865c0660 | 188b104b81e6c73792f7c803c0fa025f9413a484 | refs/heads/master | 2020-12-24T16:59:12.464615 | 2015-01-19T21:59:15 | 2015-01-19T21:59:15 | 26,719,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | '''
Created on 21 Nov 2014
@author: zouyuanrenren
'''
'''
Given a binary tree, find the maximum path sum.
The path may start and end at any node in the tree.
For example:
Given the below binary tree,
1
/ \
2 3
Return 6.
'''
'''
The idea is simple:
1. for each node, there are 4 paths that include the node:
a. node itself
b. node + left sub-path with max sum
c. node + right sub-path with max sum
d. node + left sub-path with max sum + right sub-path with max sum
we only need to compute the largest out of the above 4 for each node
2. for each node, the sub-path with max sum that ends with the node can be:
a. node itself
b. node + left sub-path with max sum
c. node + right sub-path with max sum
we only need to compute the largest out of the above 3 for each node, so that it can be used by its parent node
3. hence we do with depth-first search and recursion
'''
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxPathSum(self, root):
if root == None:
return 0
maxlist = [None]
self.maxsum(root,maxlist)
return maxlist[0]
def maxsum(self,root,maxlist):
if root == None:
return 0
leftmax = self.maxsum(root.left,maxlist)
rightmax = self.maxsum(root.right,maxlist)
result = max(root.val,root.val+leftmax,root.val+rightmax)
current = max(result,root.val+leftmax+rightmax)
maxlist[0] = current if maxlist[0] == None else max(current, maxlist[0])
return result
| [
"y.ren@abdn.ac.uk"
] | y.ren@abdn.ac.uk |
55865bd610510d6adfb96a2195797860ebd21aa2 | a46fc5187245f7ac79758ae475d4d865e24f482b | /211_add_and_search_word/add_word.py | 10752d7d4f827bc56950070c27b991981c6095a3 | [] | no_license | narnat/leetcode | ae31f9321ac9a087244dddd64706780ea57ded91 | 20a48021be5e5348d681e910c843e734df98b596 | refs/heads/master | 2022-12-08T00:58:12.547227 | 2020-08-26T21:04:53 | 2020-08-26T21:04:53 | 257,167,879 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | #!/usr/bin/env python
class Node:
def __init__(self):
"""
Prefix tree node
@children: child nodes
"""
self.children = 26 * [None]
self.is_end = False
class WordDictionary:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def addWord(self, word: str) -> None:
"""
Adds a word into the data structure.
"""
root = self.root
for c in word:
idx = ord(c) - ord('a')
if root.children[idx] is None:
root.children[idx] = Node()
root = root.children[idx]
root.is_end = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
"""
return self.search_rec(word, 0, self.root)
def search_rec(self, word, n, node):
if node is None:
return False
if n == len(word):
return node.is_end
if word[n] == '.':
for child in node.children:
if self.search_rec(word, n + 1, child):
return True
else:
idx = ord(word[n]) - ord('a')
if self.search_rec(word, n + 1, node.children[idx]):
return True
return False
| [
"farruh1996@gmail.com"
] | farruh1996@gmail.com |
87d22d165b6db77ed6dce9c200bbaaa6eb4f752f | 6b85910d57ad533b887a462082084dcef8e42bd8 | /cifar10_brn_mode_2.py | 004cf3feedfaebbd57caf7ffea8a3b1a5f0d4db3 | [] | no_license | ml-lab/BatchRenormalization | 49137cb7457f27807524500bee422c085a2fb4e8 | fdd1cd2c0da0f6105ad29852969630abeb4890c7 | refs/heads/master | 2020-05-29T21:03:29.698663 | 2017-02-20T22:31:21 | 2017-02-20T22:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | import numpy as np
import json
import keras.callbacks as callbacks
from keras.datasets import cifar10
import keras.utils.np_utils as kutils
from keras import backend as K
from wrn_renorm import WideResidualNetwork
batch_size = 128
nb_epoch = 100
img_rows, img_cols = 32, 32
(trainX, trainY), (testX, testY) = cifar10.load_data()
trainX = trainX.astype('float32')
trainX /= 255.0
testX = testX.astype('float32')
testX /= 255.0
trainY = kutils.to_categorical(trainY)
testY = kutils.to_categorical(testY)
init_shape = (3, 32, 32) if K.image_dim_ordering() == 'th' else (32, 32, 3)
model = WideResidualNetwork(depth=16, width=4, weights=None, classes=10, mode=2) # mode 2
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.load_weights('weights/Batch renorm Weights Mode 2.h5')
# history = model.fit(trainX, trainY, batch_size, nb_epoch=nb_epoch,
# callbacks=[
# callbacks.ModelCheckpoint("weights/Batch renorm Weights Mode 2.h5", monitor="val_acc", save_best_only=True,
# save_weights_only=True)],
# validation_data=(testX, testY))
#
# with open('history/batch_renorm_mode_2_history.txt', 'w') as f:
# json.dump(history.history, f)
scores = model.evaluate(testX, testY, batch_size)
print("Test loss : %0.5f" % (scores[0]))
print("Test accuracy = %0.5f" % (scores[1]))
| [
"titu1994@gmail.com"
] | titu1994@gmail.com |
ba8acff9e53924815b665296b189e9c5a48a1694 | cb99ba5b850e5667166c9a7b318ab09f28a50da3 | /wxchat/decorators.py | f43b23f05028c480d7b5ff78d40110cb97151d10 | [] | no_license | malx927/kele | 3831714eb6335e6fb2b05d463e4c7875aa87de2b | 542b412e9e9859b03d47d289a9069b9262289897 | refs/heads/master | 2022-12-02T13:29:57.174259 | 2021-07-11T13:26:00 | 2021-07-11T13:26:00 | 130,623,335 | 0 | 1 | null | 2022-11-22T02:28:55 | 2018-04-23T01:21:14 | HTML | UTF-8 | Python | false | false | 2,631 | py | #-*-coding:utf-8-*-
import json
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404
from wxchat.models import WxUserinfo
__author__ = 'malxin'
from django.conf import settings
from wechatpy.oauth import WeChatOAuth
def weixin_decorator(func):
def wrapper(request, *args, **kwargs):
code = request.GET.get('code', None)
openid = request.session.get('openid', None)
print('weixin_decorator', code, openid)
if openid is None:
if code is None: # 获取授权码code
redirect_url = '%s://%s%s' % (request.scheme, request.get_host(), request.get_full_path())
print('redirect_url=', redirect_url)
webchatOAuth = WeChatOAuth(settings.WECHAT_APPID, settings.WECHAT_SECRET, redirect_url, 'snsapi_userinfo')
authorize_url = webchatOAuth.authorize_url
return HttpResponseRedirect(authorize_url)
else: # 同意授权,通过授权码获取ticket,根据ticket拉取用户信息
webchatOAuth = WeChatOAuth(settings.WECHAT_APPID, settings.WECHAT_SECRET, '', 'snsapi_userinfo')
res = webchatOAuth.fetch_access_token(code)
if 'errcode' in res:
return HttpResponse(json.dumps(res))
else:
open_id = webchatOAuth.open_id
userinfo = webchatOAuth.get_user_info()
userinfo.pop('privilege')
obj, created = WxUserinfo.objects.update_or_create(openid=open_id, defaults=userinfo)
request.session['openid'] = open_id
userinf = get_object_or_404(WxUserinfo, openid=open_id)
request.session['nickname'] = userinf.nickname
request.session['is_member'] = userinf.is_member
request.session['headimgurl'] = userinf.headimgurl
request.session['role'] = userinf.member_role.id if userinf.member_role else 0
return func(request, *args, **kwargs)
else:
request.session['openid'] = openid
userinf = get_object_or_404(WxUserinfo, openid=openid)
request.session['nickname'] = userinf.nickname
# request.session['is_member'] = userinf.is_member
request.session['is_member'] = 1
request.session['headimgurl'] = userinf.headimgurl
request.session['role'] = userinf.member_role.id if userinf.member_role else 0
return func(request, *args, **kwargs)
return wrapper
| [
"5971158@qq.com"
] | 5971158@qq.com |
4d0139920a8802766a558ba8a6027cb12a1d4cda | 589fa0b489269a87b577874423dc1bc1a7662e47 | /examples/processing_flipped/plot_general_org.py | 58e97dd954c36474f719e8106e9207acc681c41c | [] | no_license | rjonnal/octoblob | fa15ad5fc0384b9a73175d5b064f10bca8b60766 | 680cd0cb3d8da47726d46c1285ff0ebd215cec6f | refs/heads/main | 2023-07-21T03:21:16.333278 | 2023-07-12T14:29:33 | 2023-07-12T14:29:33 | 327,063,294 | 4 | 1 | null | 2021-09-25T16:02:35 | 2021-01-05T16:58:10 | Jupyter Notebook | UTF-8 | Python | false | false | 16,081 | py | from matplotlib import pyplot as plt
import numpy as np
import sys,os,glob,shutil
import logging
import octoblob.functions as blobf
import octoblob.org_tools as blobo
import pathlib
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.size"] = 9
# The index of the processed ORG blocks at which the stimulus was delivered.
# A few cases:
# 1. Typical cone ORG applications. We process blocks B-scans 80 through 140.
# The stimulus flash is given at B-scan 100, which is the 20th processed
# B-scan. Thus, stimulus_index=20
# 2. Noise/filtering project. We want to see all the pre-stimulus blocks, thus
# we process B-scans 0 through 140. The stimulus flash is given at 0.25 s
# (with a B-scan rate of 400 Hz and period of 2.5 ms), thus the stimulus
# flash is given at the 100th B-scan, and stimulus_index = 100
stimulus_index = 20
box_alpha = 0.75
box_linewidth = 2.0
box_padding = 3.0
line_alpha = 1.0
line_linewidth = 1.0
org_plot_linewidth = 0.75
org_plot_alpha = 0.5
mean_org_plot_alpha = 1.0
mean_org_plot_linewidth = 1
tlim = (-0.04,0.04) # time limits for plotting ORG in s
zlim = (400,600) # depth limits for profile plot in um
vlim = (-5,5) # velocity limits for plotting in um/s
z_um_per_pixel = 3.0
# refine_z specifies the number of pixels (+/-) over which the
# program may search to identify a local peak. The program begins by asking
# the user to trace line segments through two layers of interest. These layers
# may not be smooth. From one A-scan to the next, the brightest pixel or "peak"
# corresponding to the layer may be displaced axially from the intersection
# of the line segment with the A-scan. refine_z specifies the distance (in either
# direction, above or below that intersection) where the program may search for a
# brighter pixel with which to compute the phase. The optimal setting here will
# largely be determined by how isolated the layer of interest is. For a relatively
# isolated layer, such as IS/OS near the fovea, a large value may be best. For
# closely packed layers such as COST and RPE, smaller values may be useful. The
# user receives immediate feedback from the program's selection of bright pixels
# and can observe whether refine_z is too high (i.e., causing the wrong layer
# to be segmented) or too low (i.e., missing the brightest pixels.
refine_z = 1
def level(im):
rv = get_level_roll_vec(im)
return shear(im,rv)
def shear(im,roll_vec):
out = np.zeros(im.shape)
for idx,r in enumerate(roll_vec):
out[:,idx] = np.roll(im[:,idx],r)
return out
def get_roll_vec(im,row_per_col):
sy,sx = im.shape
roll_vec = (np.arange(sx)-sx/2.0)*row_per_col
roll_vec = np.round(roll_vec).astype(int)
return roll_vec
def get_level_roll_vec(im,limit=0.1,N=16):
rpc_vec = np.linspace(-limit,limit,N)
rotated_profiles = []
roll_vecs = []
for rpc in rpc_vec:
rv = get_roll_vec(im,rpc)
sheared = shear(im,rv)
roll_vecs.append(rv)
rotated_profiles.append(np.mean(sheared,axis=1))
rotated_profiles = np.array(rotated_profiles)
rpmax = np.max(rotated_profiles,axis=1)
widx = np.argmax(rpmax)
return roll_vecs[widx]
def path2str(f):
head,tail = os.path.split(f)
tails = []
while len(head)>0:
tails.append(tail)
head,tail = os.path.split(head)
tails = tails[::-1]
return '_'.join(tails)
def collect_files(src,dst):
flist = glob.glob(os.path.join(src,'*'))
os.makedirs(dst,exist_ok=True)
for f in flist:
outf = os.path.join(dst,path2str(f))
shutil.copyfile(f,outf)
def phase_to_nm(phase):
return phase/(4*np.pi*1.38)*1050.0
def nm_to_phase(nm):
return nm*(4*np.pi*1.38)/1050.0
# pay attention to the default value of stim_index, since the b-scans right after stimulus
# determine how the data are displayed to the user; until late 2022, we've been collecting 400
# @ 400 Hz, and the stimulus is delivered 0.25 seconds into the series, i.e. at frame 100; however
# we only process B-scans 80-140, i.e. 50 ms before stimulus through 100 ms after stimulus, and
# thus the stim_index is 20
def plot(folder,stim_index=stimulus_index):
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
phase_slope_flist = glob.glob(os.path.join(folder,'*phase_slope.npy'))
phase_slope_flist.sort()
amplitude_flist = glob.glob(os.path.join(folder,'*amplitude.npy'))
amplitude_flist.sort()
# now we load the other data that may be useful for filtering:
correlations_flist = glob.glob(os.path.join(folder,'*correlations.npy'))
correlations_flist.sort()
masked_temporal_variance_flist = glob.glob(os.path.join(folder,'*masked_temporal_variance.npy'))
masked_temporal_variance_flist.sort()
phase_slope_fitting_error_flist = glob.glob(os.path.join(folder,'*phase_slope_fitting_error.npy'))
phase_slope_fitting_error_flist.sort()
temporal_variance_flist = glob.glob(os.path.join(folder,'*temporal_variance.npy'))
temporal_variance_flist = [f for f in temporal_variance_flist if f.find('masked')==-1]
temporal_variance_flist.sort()
#t = np.arange(len(amplitude_flist))*0.0025-0.24
t = (-stim_index+np.arange(len(amplitude_flist)))*0.0025+10e-3
display_bscan = np.load(amplitude_flist[stim_index])
dB = 20*np.log10(display_bscan)
dbclim = np.percentile(dB,(30,99.99))
markersize = 8.0
global rois,click_points,index,abscans,pbscans,tag,correlations,masked_temporal_variance,phase_slope_fitting_error_bscans,temporal_variance
tag = folder.replace('/','_').replace('\\','_')
roll_vec = get_level_roll_vec(display_bscan)
display_bscan = shear(display_bscan,roll_vec)
abscans = []
pbscans = []
correlations = []
masked_temporal_variance = []
phase_slope_fitting_error_bscans = []
temporal_variance = []
for pf,af,cf,mtvf,psfef,tvf in zip(phase_slope_flist,amplitude_flist,correlations_flist,masked_temporal_variance_flist,phase_slope_fitting_error_flist,temporal_variance_flist):
abscans.append(shear(np.load(af),roll_vec))
pbscans.append(shear(np.load(pf),roll_vec))
correlations.append(np.load(cf))
masked_temporal_variance.append(np.load(mtvf))
phase_slope_fitting_error_bscans.append(shear(np.load(psfef),roll_vec))
temporal_variance.append(np.load(tvf))
abscans = np.array(abscans)
pbscans = np.array(pbscans)
correlations = np.array(correlations)
masked_temporal_variance = np.array(masked_temporal_variance)
phase_slope_fitting_error_bscans = np.array(phase_slope_fitting_error_bscans)
temporal_variance = np.array(temporal_variance)
rois = []
click_points = []
index = 0
fig = plt.figure()
fig.set_size_inches((6,3))
fig.set_dpi(300)
ax1 = fig.add_axes([0.03,0.03,.38,0.94])
ax2 = fig.add_axes([0.51,0.6,0.38,0.37])
ax3 = fig.add_axes([0.51,0.1,0.38,0.37])
ax1.set_xlim((10,235))
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_aspect('auto')
ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')
ax2.set_ylim(vlim)
ax2.set_xlim(tlim)
ax2.set_xlabel('time (s)')
ax2.set_ylabel('$v$ ($\mu m$/s)')
ax3.set_xlabel('depth ($\mu m$)')
ax3.set_xlim(zlim)
ax3.set_yticks([])
ax3.set_ylabel('amplitude (ADU)')
ax1.set_xlim((10,235))
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_aspect('auto')
ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')
ax2.axvline(0.0,color='g',linestyle='--')
plt.pause(.0001)
def draw_rois():
ax1.clear()
ax1.set_xlim((10,235))
ax1.set_xticks([])
ax1.set_yticks([])
ax1.set_aspect('auto')
ax1.imshow(20*np.log10(display_bscan),clim=dbclim,cmap='gray',aspect='auto')
ax3.clear()
ax3.set_xlim(zlim)
for k,roi in enumerate(rois):
full_profile = roi[7]
full_profile = full_profile-np.min(full_profile)
full_profile_pv = np.max(full_profile)
if k==0:
offset0 = full_profile_pv*0.2
offset = offset0*k
z_um = np.arange(len(full_profile))*z_um_per_pixel
x1 = roi[5]
x2 = roi[6]
bx1 = x1-box_padding
bx2 = x2+box_padding
x = np.arange(x1,x2)
layer_1_z = roi[3][stim_index,:]
layer_2_z = roi[4][stim_index,:]
bz1 = np.min(layer_1_z)-box_padding
bz2 = np.max(layer_2_z)+box_padding
ax1.plot(x,layer_1_z,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth)
ax1.plot(x,layer_2_z,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth)
ax1.plot([bx1,bx2,bx2,bx1,bx1],[bz1,bz1,bz2,bz2,bz1],alpha=box_alpha,linewidth=box_linewidth)
ax3.plot(z_um,full_profile-offset,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth)
l1zmean = np.mean(layer_1_z)*z_um_per_pixel
l2zmean = np.mean(layer_2_z)*z_um_per_pixel
ax3.axvline(l1zmean,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth,linestyle=':')
ax3.axvline(l2zmean,color=colors[k%len(colors)],alpha=line_alpha,linewidth=line_linewidth,linestyle=':')
ax2.clear()
ax2.set_ylim(vlim)
ax2.set_xlim(tlim)
ax3.set_xlabel('depth ($\mu m$)')
ax3.set_xlim(zlim)
ax3.set_yticks([])
osv_mat = []
layer_amplitude_mean_mat = []
for k,roi in enumerate(rois):
layer_amplitude_mean = roi[1]
osv = roi[2]
osv_mat.append(osv)
layer_amplitude_mean_mat.append(layer_amplitude_mean)
ax2.plot(t,osv,linewidth=org_plot_linewidth,alpha=org_plot_alpha,color=colors[k%len(colors)])
if len(rois)>1:
osv_mat = np.array(osv_mat)
layer_amplitude_mean_mat = np.array(layer_amplitude_mean_mat)
mosv = np.nanmean(osv_mat,axis=0)
mlayer_amplitude_mean = np.nanmean(layer_amplitude_mean_mat,axis=0)
ax2.plot(t,mosv,color='k',alpha=mean_org_plot_alpha,linewidth=mean_org_plot_linewidth)
ax2.set_xlabel('time (s)')
ax2.set_ylabel('$v$ ($\mu m$/s)')
ax2.axvline(0.0,color='g',linestyle='--')
ax3.set_ylabel('amplitude (ADU)')
plt.pause(.1)
def onclick(event):
global rois,click_points,index,abscans,pbscans,tag,correlations,masked_temporal_variance,phase_slope_fitting_error_bscans,temporal_variance
if event.button==1:
if event.xdata is None and event.ydata is None:
# clicked outside plot--clear everything
print('Clearing.')
click_points = []
rois = []
draw_rois()
if event.inaxes==ax1:
if event.button==1:
xnewclick = event.xdata
ynewclick = event.ydata
click_points.append((int(round(xnewclick)),int(round(ynewclick))))
if len(click_points)==1:
#ax1.clear()
#ax1.imshow(20*np.log10(display_bscan),clim=(45,85),cmap='gray')
#ax1.plot(click_points[0][0],click_points[0][1],'bo')
plt.pause(.1)
if len(click_points)==2:
x1,x2 = [a[0] for a in click_points]
z1,z2 = [a[1] for a in click_points]
ax1.plot([x1,x2],[z1,z2],'w-')
plt.pause(.1)
if len(click_points)==4:
x1,x2,x3,x4 = [a[0] for a in click_points]
z1,z2,z3,z4 = [a[1] for a in click_points]
valid = True
print('x1=%0.1f,x2=%0.1f,z1=%0.1f,z2=%0.1f'%(x1,x2,z1,z2))
print('x3=%0.1f,x4=%0.1f,z3=%0.1f,z4=%0.1f'%(x3,x4,z3,z4))
try:
if True:
layer_amplitude_mean,osv,layer_1_z,layer_2_z,x1,x2,full_profile = blobo.extract_layer_velocities_lines(abscans,pbscans,x1,x2,z1,z2,x3,x4,z3,z4,stim_index=stim_index,refine_z=refine_z)
else:
layer_amplitude_mean,osv,layer_1_z,layer_2_z,x1,x2,full_profile = blobo.extract_layer_velocities_region(abscans,pbscans,x1,x2,z1,z2,stim_index=stim_index,refine_z=refine_z)
except Exception as e:
print('ROI could not be processed:',e)
valid = False
click_points = []
if valid:
# osv is now in radians/block
# we want it in nm/s
# osv * blocks/sec * nm/radian
# nm/radian = 1060.0/(2*np.pi)
osv = 1e-3*phase_to_nm(osv)/2.5e-3
rois.append((click_points,layer_amplitude_mean,osv,layer_1_z,layer_2_z,x1,x2,full_profile))
click_points = []
draw_rois()
index+=1
elif event.button==3:
x = event.xdata
y = event.ydata
new_rois = []
for idx,roi in enumerate(rois):
x1,y1 = roi[0][0]
x2,y2 = roi[0][1]
if x1<x<x2 and y1<y<y2:
pass
else:
new_rois.append(roi)
rois = new_rois
draw_rois()
def onpress(event):
global rois,click_points,index,tag
if event.key=='enter':
outfolder = os.path.join(folder,'layer_velocities_results')
print('Saving results to %s.'%outfolder)
os.makedirs(outfolder,exist_ok=True)
np.save(os.path.join(outfolder,'display_bscan.npy'),display_bscan)
nrois = len(rois)
fx1,fx2,fx3,fx4 = [a[0] for a in rois[0][0]]
fz1,fz2,fz3,fz4 = [a[1] for a in rois[0][0]]
froi_tag = '%s_%d_%d_%d_%d_'%(tag,fx1,fx2,fz1,fz3)
fig.savefig(os.path.join(outfolder,'figure_%d_rois %s.png'%(nrois,froi_tag)),dpi=300)
fig.savefig(os.path.join(outfolder,'figure_%d_rois_%s.pdf'%(nrois,froi_tag)))
fig.savefig(os.path.join(outfolder,'figure_%d_rois_%s.svg'%(nrois,froi_tag)))
for roi in rois:
x1,x2,x3,x4 = [a[0] for a in roi[0]]
z1,z2,z3,z4 = [a[1] for a in roi[0]]
roi_tag = '%s_%d_%d_%d_%d_'%(tag,x1,x2,z1,z3)
fnroot = os.path.join(outfolder,roi_tag)
np.save(fnroot+'rect_points.npy',roi[0])
np.save(fnroot+'amplitude.npy',roi[1])
np.save(fnroot+'velocity.npy',roi[2])
np.save(fnroot+'layer_1_z.npy',roi[3])
np.save(fnroot+'layer_2_z.npy',roi[4])
collect_files(outfolder,'./layer_velocities_results')
elif event.key=='backspace':
rois = rois[:-1]
click_points = []
draw_rois()
cid = fig.canvas.mpl_connect('button_press_event',onclick)
pid = fig.canvas.mpl_connect('key_press_event',onpress)
#plt.subplot(1,2,2,label='foo')
plt.show()
return rois
if __name__=='__main__':
if len(sys.argv)<2:
folder = '.'
else:
folder = sys.argv[1]
if os.path.split(folder)[1]=='org':
plot(folder)
else:
org_folders = pathlib.Path(folder).rglob('org')
org_folders = [str(f) for f in org_folders]
org_folders.sort()
for of in org_folders:
print('Working on %s.'%of)
plot(of)
| [
"rjonnal@gmail.com"
] | rjonnal@gmail.com |
0b18106e68e7b8b158f4ee65cfb171cec8fa86ad | 886a374cc162a64f8a1f68548e7229b0354d232a | /pandajedi/jedisetup/GenTaskSetupper.py | e84219873e9314f9bf8257e191b09ff834f8b641 | [
"Apache-2.0"
] | permissive | pavlo-svirin/panda-jedi | f6cf9a4ddbb4d1525ad08de5167cf97a5f82f6a5 | 635dfbd38d85ebc8f837b06cbea1203daf291a71 | refs/heads/master | 2020-03-23T10:54:34.911666 | 2017-02-15T22:52:42 | 2017-02-15T22:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore import Interaction
from TaskSetupperBase import TaskSetupperBase
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# task setup for general purpose
class GenTaskSetupper (TaskSetupperBase):
# constructor
def __init__(self,taskBufferIF,ddmIF):
TaskSetupperBase.__init__(self,taskBufferIF,ddmIF)
# main to setup task
def doSetup(self,taskSpec,datasetToRegister,pandaJobs):
return self.SC_SUCCEEDED
| [
"tmaeno@bnl.gov"
] | tmaeno@bnl.gov |
0ee9c877642b14ad79d684f02024646632c5e64e | 62edb9b550ef41899e8d80edbd72fc66898c37b8 | /swagger_client/models/linked_artifact.py | 17552248e7be92499bab954997a82fed56eb415f | [
"Apache-2.0"
] | permissive | isabella232/qtest-swagger-client | 6a5575655b8af16f25fdde1eef056fec1c128081 | 28220aa95d878922ca4b35c325706932adabea4e | refs/heads/master | 2023-07-11T00:50:27.980979 | 2018-06-20T15:48:02 | 2018-06-20T15:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,941 | py | # coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class LinkedArtifact(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, pid=None, link_type=None, _self=None):
"""
LinkedArtifact - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'pid': 'str',
'link_type': 'str',
'_self': 'str'
}
self.attribute_map = {
'id': 'id',
'pid': 'pid',
'link_type': 'link_type',
'_self': 'self'
}
self._id = id
self._pid = pid
self._link_type = link_type
self.__self = _self
@property
def id(self):
"""
Gets the id of this LinkedArtifact.
ID of linked artifact
:return: The id of this LinkedArtifact.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this LinkedArtifact.
ID of linked artifact
:param id: The id of this LinkedArtifact.
:type: int
"""
self._id = id
@property
def pid(self):
"""
Gets the pid of this LinkedArtifact.
PID of linked artifact
:return: The pid of this LinkedArtifact.
:rtype: str
"""
return self._pid
@pid.setter
def pid(self, pid):
"""
Sets the pid of this LinkedArtifact.
PID of linked artifact
:param pid: The pid of this LinkedArtifact.
:type: str
"""
self._pid = pid
@property
def link_type(self):
"""
Gets the link_type of this LinkedArtifact.
Type of relationship between source and linked Artifact
:return: The link_type of this LinkedArtifact.
:rtype: str
"""
return self._link_type
@link_type.setter
def link_type(self, link_type):
"""
Sets the link_type of this LinkedArtifact.
Type of relationship between source and linked Artifact
:param link_type: The link_type of this LinkedArtifact.
:type: str
"""
self._link_type = link_type
@property
def _self(self):
"""
Gets the _self of this LinkedArtifact.
URL to linked artifact
:return: The _self of this LinkedArtifact.
:rtype: str
"""
return self.__self
@_self.setter
def _self(self, _self):
"""
Sets the _self of this LinkedArtifact.
URL to linked artifact
:param _self: The _self of this LinkedArtifact.
:type: str
"""
self.__self = _self
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, LinkedArtifact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ryan.gard@rackspace.com"
] | ryan.gard@rackspace.com |
481ad6fb62ef15a1ee98f3b5f4350de4a9dcbd52 | 978c9a1dd27a30b32eceed7f1518a26292695891 | /python/2021/other/weather_api.py | f0b71270fdf2f0517f792a8c2216904cb24f3455 | [] | no_license | detcitty/100DaysOfCode | 4da3407bdc4170f9d042f49e6c94a8469f8808f5 | a3d989ea56491f89ece5191d5246166ca01d2602 | refs/heads/master | 2023-08-09T04:45:51.842305 | 2023-07-21T17:02:08 | 2023-07-21T17:02:08 | 178,976,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import os
import requests
import json
KEY = os.getenv('AQS_API_KEY')
EMAIL = os.getenv('MY_EMAIL')
print(KEY)
url = ' https://aqs.epa.gov/data/api/moniters/bySite'
params = {
'email': EMAIL,
'key': KEY,
'param': 'ALL',
'bdate': 20210101,
'edate': 20210214,
'state': 49,
'county': 35,
'site': 13
}
def jprint(obj):
text = json.dumps(obj, sort_keys=True, indent=4)
response = requests.get(url, params=params)
jprint(response.json()) | [
"devin.etcitty@gmail.com"
] | devin.etcitty@gmail.com |
ed90d21b756c2faab22171990cb6be9c38c4d785 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/0e16a5f3ee9b8c7e931b860f7790ea9a6197651b-<install>-bug.py | cf7b852b3186bff51a0088e47a08ccab0dae2941 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,338 | py | def install(self):
if self.scm:
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
tmp_file = self.src
elif ('://' in self.src):
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if (not role_data):
raise AnsibleError(('- sorry, %s was not found on %s.' % (self.src, api.api_server)))
role_versions = api.fetch_role_related('versions', role_data['id'])
if (not self.version):
if (len(role_versions) > 0):
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[(- 1)])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif (self.version != 'master'):
if (role_versions and (self.version not in [a.get('name', None) for a in role_versions])):
raise AnsibleError(('- the specified version (%s) of %s was not found in the list of available versions (%s).' % (self.version, self.name, role_versions)))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError('No valid role data found')
if tmp_file:
display.debug(('installing from %s' % tmp_file))
if (not tarfile.is_tarfile(tmp_file)):
raise AnsibleError('the file downloaded was not a tar.gz')
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, 'r:gz')
else:
role_tar_file = tarfile.open(tmp_file, 'r')
meta_file = None
members = role_tar_file.getmembers()
for member in members:
if (self.META_MAIN in member.name):
meta_file = member
break
if (not meta_file):
raise AnsibleError('this role does not appear to have a meta/main.yml file.')
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError('this role does not appear to have a valid meta/main.yml file.')
display.display(('- extracting %s to %s' % (self.name, self.path)))
try:
if os.path.exists(self.path):
if (not os.path.isdir(self.path)):
raise AnsibleError('the specified roles path exists and is not a directory.')
elif (not getattr(self.options, 'force', False)):
raise AnsibleError(('the specified role %s appears to already exist. Use --force to replace it.' % self.name))
elif (not self.remove()):
raise AnsibleError(("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path))
else:
os.makedirs(self.path)
for member in members:
if (member.isreg() or member.issym()):
parts = member.name.split(os.sep)[1:]
final_parts = []
for part in parts:
if ((part != '..') and ('~' not in part) and ('$' not in part)):
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
self._write_galaxy_install_info()
except OSError as e:
raise AnsibleError(('Could not update files in %s: %s' % (self.path, str(e))))
display.display(('- %s was installed successfully' % self.name))
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning(('Unable to remove tmp file (%s): %s' % (tmp_file, str(e))))
return True
return False | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
d3878e2d9c6758ee16ae2176a95d594c2e3238eb | 0ee88932af5b6ed088e471abcbd5f40fd9cbd688 | /Course/Book/Programmer_avec_Python3/8-Tkinter/attractionclic.py | 0e6e7adc9081b4ab66e95a67b4f2a1cbe9c66bd3 | [] | no_license | BjaouiAya/Cours-Python | 48c740966f9814e1045035ffb902d14783d36194 | 14b306447e227ddc5cb04b8819f388ca9f91a1d6 | refs/heads/master | 2021-06-10T22:17:38.731030 | 2016-11-11T16:45:05 | 2016-11-11T16:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,009 | py | #! /usr/bin/env python
# -*- coding:Utf8 -*-
"PROGRAMME AUTOUR DE L'ATTRACTION TERRESTRE AVEC CLIC"
################################################################
############# Importation fonction et modules : ################
################################################################
from tkinter import *
from math import sqrt
###################################################################################################
############# Gestion d'évènements : définition de différentes fonctions utiliées : ##############
###################################################################################################
def avance(n, xcoord, ycoord):
"Procédure générale"
global x, y
x[n], y[n] = x[n] + xcoord, y[n] + ycoord
can1.coords(astre[n], x[n], y[n], x[n]+xx, y[n]+yy)
"distance entre le 2 astres"
distanceastres = mesuredistance(x[0], x[1], y[0], y[1])
"distance en km entre les 2 astres"
distancereele = distanceastres * 1e9 # assimile 1 pixel à 1 000 000 de km
"force gravittionelle entre les 2 astres"
force = forceG(m1, m2, distancereele)
distance.configure(text = 'Distance de ' + str(distancereele) + ' Km')
forcegrav.configure(text = 'Force de ' + str(force) + ' KN')
decalage = distanceastres / 10
def avanceclic(event):
"Procédure générale"
global x, y
x[masseclic], y[masseclic] = event.x-xx/2, event.y-yy/2
"on décale l'astre afin de le faire apparaître au centre du clic et non en décalage"
can1.coords(astre[masseclic], x[masseclic], y[masseclic], x[masseclic]+xx, y[masseclic]+yy)
"distance entre les 2 astres : on déduit de chaques astres la moitié afin de corriger l'écart dû au clic (clic prend des coordonnées point haut à gauche"
distanceastres = mesuredistance(x[0], x[1], y[0], y[1])
"distance en km entre les 2 astres"
distancereele = distanceastres * 1e9 # assimile 1 pixel à 1 000 000 de km
"force gravittionelle entre les 2 astres"
force = forceG(m1, m2, distancereele)
distance.configure(text = 'Distance de ' + str(distancereele) + ' Km')
forcegrav.configure(text = 'Force de ' + str(force) + ' KN')
decalage = distanceastres / 10
def forceG(m1, m2, distanceastres):
"force de gravitation s'exerçant entre m1 et m2 pour une distance di"
if distanceastres == 0: # evite une division par 0 qui se solde par une erreur
return 'infini'
return int((m1*m2*6.67e-11/distanceastres**2)/1000)
def mesuredistance(x1, x2, y1, y2):
d = int(sqrt((x2 - x1)**2 + (y2 - y1)**2))
return d
def deplacement_gauche1():
avance(0, -decalage, 0)
def deplacement_droite1():
avance(0, decalage, 0)
def deplacement_bas1():
avance(0, 0, decalage)
def deplacement_haut1():
avance(0, 0, -decalage)
def deplacement_gauche2():
avance(1, -decalage, 0)
def deplacement_droite2():
avance(1, decalage, 0)
def deplacement_bas2():
avance(1, 0, decalage)
def deplacement_haut2():
avance(1, 0, -decalage)
def selection1():
global masseclic
masseclic = 0
def selection2():
global masseclic
masseclic = 1
######################################################
############## Programme principal : #################
######################################################
"coordonnées de base"
x = [50, 10] # liste pour les coordonnées en x des astres
y = [100, 50] # liste pour les coordonnées en y des astres
"taille pointeur"
xx, yy = 30, 30
"masse des astres"
m1 = 6e24
m2 = 6e24
"décalage de base"
decalage = 5
masseclic = 0 # permet de sélectionner une ou l'autre des masses
"Liste permettant de mémoriser les indices du dessin"
astre = [0]*2 # liste servant à mémoriser les références des dessins
"widgets"
fen1 = Tk()
fen1.title("Attration atrale")
can1 = Canvas(fen1, width = 400, height = 200, bg = 'grey')
can1.grid(row =2, column =1, columnspan =3, padx = 20, pady = 20)
astre[0] = can1.create_oval(x[0], y[0], x[0]+xx, y[0]+yy, width = 2, fill = 'blue')
astre[1] = can1.create_oval(x[1], y[1], x[1]+xx, y[1]+yy, width = 2, fill = 'green')
"textes des différentes fenêtres"
valmasse1 = Label(fen1, text = 'Astre 1 : '+ str(m1) + ' Kg')
valmasse2 = Label(fen1, text = 'Astre 2 : '+ str(m2) + ' Kg')
distance = Label(fen1)
forcegrav = Label(fen1)
valmasse1.grid(row = 1, column = 1, padx = 5, pady = 5, sticky = W)
valmasse2.grid(row = 1, column = 3, padx = 5, pady = 5, sticky = E)
distance.grid(row = 4, column = 1, padx = 5, pady = 5)
forcegrav.grid(row = 4, column = 3, padx = 5, pady = 5)
############################################
"GROUPE ASTRE 1 AVEC 4 BOUTTONS"
fra1 = Frame(fen1) # association dans un cadre un ensemble de bouttons
fra1.grid(row = 3, column = 1, sticky = W, padx = 10, pady = 10)
Button(fra1, fg = 'blue', command = deplacement_bas1, text = 'v').pack(side = LEFT)
Button(fra1, fg = 'blue', command = deplacement_haut1, text = '^').pack(side = LEFT)
Button(fra1, fg = 'blue', command = deplacement_droite1, text = '->').pack(side = LEFT)
Button(fra1, fg = 'blue', command = deplacement_gauche1, text = '<-').pack(side = LEFT)
"GROUPE ASTRE 2 AVEC 4 BOUTTONS"
fra2 = Frame(fen1)
fra2.grid(row = 3, column = 3, sticky = E, padx = 10, pady = 10)
Button(fra2, fg = 'green', command = deplacement_bas2, text = 'v').pack(side =LEFT)
Button(fra2, fg = 'green', command = deplacement_haut2, text = '^').pack(side =LEFT)
Button(fra2, fg = 'green', command = deplacement_droite2, text = '->').pack(side =LEFT)
Button(fra2, fg = 'green', command = deplacement_gauche2, text = '<-').pack(side =LEFT)
#############################################
"permet de bouger les 2 astres par sélection par un boutton puis nouvelle position par clic"
can1.bind("<Button-1>", avanceclic)
Button(fen1, fg = 'black', command = selection1, text = 'Astre bleu').grid(row = 0, column = 1)
Button(fen1, fg = 'black', command = selection2, text = 'Astre vert').grid(row = 0, column = 3)
#############################################
Button(fen1, command = fen1.quit, text = 'Quitter').grid(row = 5, column = 3)
fen1.mainloop()
| [
"jeremybois@rocketmail.com"
] | jeremybois@rocketmail.com |
e4372895558b92f7d1ddbfcd05f7e499895a365c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ZrAnDiPTbmrJMHWHD_2.py | 8cb0011d67307bc9cdcc8ae01162e02aabe29868 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py |
def is_central(txt):
return len(txt)%2 and txt[len(txt)//2] != ' '
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
fbb9cca9d323db892b0cf407f976508f8e25e925 | 7c73ae5308f16030de337e2ad6dc30ac3f4a6d05 | /动态规划/背包问题.py | f1bccd5dbdd11c3581e2c1b56352eae39701c2aa | [] | no_license | pol9111/algorithms | c2508470e4e8c46f4368411a9614adbb210cfa33 | 4bd5d8cb3db9d15c23ebf217181a5f54c00c1687 | refs/heads/master | 2023-06-07T20:47:33.200001 | 2023-05-27T16:50:24 | 2023-05-27T16:50:24 | 150,764,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py |
# 这里使用了图解中的吉他,音箱,电脑,手机做的测试,数据保持一致
w = [0, 1, 4, 3, 1] #n个物体的重量(w[0]无用)
p = [0, 1500, 3000, 2000, 2000] #n个物体的价值(p[0]无用)
n = len(w) - 1 #计算n的个数
m = 4 #背包的载重量
x = [] #装入背包的物体,元素为True时,对应物体被装入(x[0]无用)
v = 0
#optp[i][j]表示在前i个物体中,能够装入载重量为j的背包中的物体的最大价值
optp = [[0 for col in range(m + 1)] for raw in range(n + 1)]
#optp 相当于做了一个n*m的全零矩阵的赶脚,n行为物件,m列为自背包载重量
print(optp)
def knapsack_dynamic(w, p, n, m, x):
#计算optp[i][j]
for i in range(1, n + 1): # 物品一件件来
for j in range(1, m + 1): # j为子背包的载重量,寻找能够承载物品的子背包
if j >= w[i]: # 当物品的重量小于背包能够承受的载重量的时候,才考虑能不能放进去
# optp[i - 1][j]是上一个单元的值, optp[i - 1][j - w[i]]为剩余空间的价值
optp[i][j] = max(optp[i - 1][j], optp[i - 1][j - w[i]] + p[i])
else: # 能放下, 就减去重量加上价格, 0 + 1500
optp[i][j] = optp[i - 1][j]
print(optp)
#递推装入背包的物体,寻找跳变的地方,从最后结果开始逆推
j = m
for i in range(n, 0, -1):
if optp[i][j] > optp[i - 1][j]:
x.append(i)
j = j - w[i]
#返回最大价值,即表格中最后一行最后一列的值
v = optp[n][m]
return v
print('最大值为:' + str(knapsack_dynamic(w, p, n, m, x)))
print('物品的索引:',x)
print('物品的索引:',optp)
| [
"biscuit36@163.com"
] | biscuit36@163.com |
b71cfdda577fac6fe368c3a6dae442d8a5020cd7 | d77f44f98f695a3bdb00f021ad2e685483b032c5 | /examples/plot_events.py | f47d98975f040e2e7d60c3d51cd76512b703b869 | [
"MIT"
] | permissive | thomasgas/pyeventio | d8d5df32f8f644b7f8877ba370d3c5179bef58a8 | 0edfb05b56ed3a3e8a37c0292cede90598464b8f | refs/heads/master | 2020-04-21T21:59:11.105773 | 2019-04-29T16:04:51 | 2019-04-29T16:04:51 | 169,896,133 | 0 | 0 | MIT | 2019-02-09T18:01:11 | 2019-02-09T18:01:10 | null | UTF-8 | Python | false | false | 2,319 | py | import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
from functools import lru_cache
import astropy.units as u
from ctapipe.instrument import CameraGeometry
from ctapipe.visualization import CameraDisplay
from eventio.simtel import SimTelFile
parser = ArgumentParser()
parser.add_argument('inputfile')
args = parser.parse_args()
@lru_cache()
def build_cam_geom(simtel_file, telescope_id):
cam_data = simtel_file.telescope_descriptions[telescope_id]['camera_settings']
if cam_data['pixel_shape'][0] == 2:
pix_type = 'square'
pix_rotation = 0 * u.deg
elif cam_data['pixel_shape'][0] == 1:
pix_type = 'hexagonal'
# LST has 0 deg rotation, MST 30 (flat top vs. pointy top hexagons)
if cam_data['n_pixels'] == 1855:
pix_rotation = 0 * u.deg
else:
pix_rotation = 30 * u.deg
# if pix_type == -1, we have to guess
elif cam_data['pixel_shape'][0] == -1:
if cam_data['n_pixels'] > 2000:
pix_type = 'square'
pix_rotation = 0 * u.deg
else:
pix_type = 'hexagonal'
# LST has 0 deg rotation, MST 30 (flat top vs. pointy top hexagons)
if cam_data['n_pixels'] == 1855:
pix_rotation = 0 * u.deg
else:
pix_rotation = 30 * u.deg
return CameraGeometry(
cam_id='CAM-{}'.format(telescope_id),
pix_id=np.arange(cam_data['n_pixels']),
pix_x=cam_data['pixel_x'] * u.m,
pix_y=cam_data['pixel_y'] * u.m,
pix_area=cam_data['pixel_area'] * u.m**2,
pix_type=pix_type,
cam_rotation=cam_data['cam_rot'] * u.rad,
pix_rotation=pix_rotation,
)
with SimTelFile(args.inputfile) as f:
for array_event in f:
print('Event:', array_event['event_id'])
for telescope_id, event in array_event['telescope_events'].items():
print('Telescope:', telescope_id)
data = event.get('adc_samples')
if data is None:
data = event['adc_sums'][:, :, np.newaxis]
image = data[0].sum(axis=1)
cam = build_cam_geom(f, telescope_id)
plt.figure()
disp = CameraDisplay(cam)
disp.image = image
plt.show()
| [
"maximilian.noethe@tu-dortmund.de"
] | maximilian.noethe@tu-dortmund.de |
6aecda70e197f8b3c3b83e2030bc806ffecc4a41 | 6b96a11195094a0023a059ba7d5df95ce58c56f1 | /1359A.py | 643057f4f600e4de77f8dcee5062457fd853ebad | [] | no_license | ldfdev/CodeForces-Div2-Problems | d932b09ee14a430fd0054d5b295f6016553be2b7 | d18824a4330a4593099d249496ae22f3f69d5f44 | refs/heads/master | 2021-08-11T03:29:18.772870 | 2021-07-25T07:21:09 | 2021-07-29T20:09:43 | 72,371,376 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def inp():
return list(map(int, input().split()))
def solve():
[cards, jokers, players] = inp()
if jokers == 0:
return 0
if cards == jokers:
return 0
lucky_player = cards // players
if jokers <= lucky_player:
return jokers
jokers -= lucky_player
if jokers % (players - 1) == 0:
return lucky_player - (jokers // (players - 1))
return lucky_player - 1 - (jokers // (players - 1))
if __name__=='__main__':
[tests] = inp()
for _ in range(tests):
print(solve()) | [
"ldf.develop@gmail.com"
] | ldf.develop@gmail.com |
a63c46202902cccf5d5730aa9ac77f7507d7dcc0 | eb8b5cde971573668800146b3632e43ed6e493d2 | /python/oneflow/__init__.py | c1901dd47d9c56cd52c5bf7dbb18b6f8b11b23b7 | [
"Apache-2.0"
] | permissive | big-data-ai/oneflow | 16f167f7fb7fca2ce527d6e3383c577a90829e8a | b1c67df42fb9c5ab1335008441b0273272d7128d | refs/heads/master | 2023-07-08T21:21:41.136387 | 2021-08-21T11:31:14 | 2021-08-21T11:31:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,613 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import oneflow._oneflow_internal
oneflow._oneflow_internal.CheckAndClearRegistryFlag()
Size = oneflow._oneflow_internal.Size
device = oneflow._oneflow_internal.device
placement = oneflow._oneflow_internal.placement
locals()["dtype"] = oneflow._oneflow_internal.dtype
locals()["char"] = oneflow._oneflow_internal.char
locals()["float16"] = oneflow._oneflow_internal.float16
locals()["half"] = oneflow._oneflow_internal.float16
locals()["float32"] = oneflow._oneflow_internal.float32
locals()["float"] = oneflow._oneflow_internal.float
locals()["double"] = oneflow._oneflow_internal.double
locals()["float64"] = oneflow._oneflow_internal.float64
locals()["int8"] = oneflow._oneflow_internal.int8
locals()["int"] = oneflow._oneflow_internal.int32
locals()["int32"] = oneflow._oneflow_internal.int32
locals()["int64"] = oneflow._oneflow_internal.int64
locals()["long"] = oneflow._oneflow_internal.int64
locals()["uint8"] = oneflow._oneflow_internal.uint8
locals()["record"] = oneflow._oneflow_internal.record
locals()["tensor_buffer"] = oneflow._oneflow_internal.tensor_buffer
from oneflow.version import __version__
_DEPRECATED = set()
def oneflow_deprecate(*api_names, **kwargs):
def Decorator(func_or_class):
_DEPRECATED.add(func_or_class)
return func_or_class
return Decorator
def is_deprecated(func_or_class):
return (
isinstance(func_or_class, collections.Hashable) and func_or_class in _DEPRECATED
)
from . import sbp
import atexit
import oneflow.framework.c_api_util
import oneflow.framework.register_class_method_util as register_class_method_util
import oneflow.framework.register_python_callback
INVALID_SPLIT_AXIS = oneflow._oneflow_internal.INVALID_SPLIT_AXIS
register_class_method_util.RegisterMethod4Class()
oneflow._oneflow_internal.RegisterGILForeignLockHelper()
import oneflow.framework.env_util as env_util
import oneflow.framework.scope_util as scope_util
import oneflow.framework.session_context as session_ctx
from oneflow.framework.multi_client_session import MultiClientSession
if not env_util.HasAllMultiClientEnvVars():
env_util.SetDefaultMultiClientEnvVars()
oneflow._oneflow_internal.SetIsMultiClient(True)
env_util.api_env_init()
oneflow._oneflow_internal.InitDefaultConsistentTransportTokenScope()
session_ctx.OpenDefaultSession(
MultiClientSession(oneflow._oneflow_internal.NewSessionId())
)
scope_util.InitScopeStack()
oneflow._oneflow_internal.EnableEagerEnvironment(True)
del env_util
from oneflow.framework import python_callback, register_python_callback
oneflow._oneflow_internal.RegisterGlobalForeignCallback(
python_callback.global_python_callback
)
del python_callback
del register_python_callback
def _SyncOnMasterFn():
if not oneflow._oneflow_internal.IsEnvInited():
return
if oneflow.framework.distribute.is_multi_client():
oneflow._oneflow_internal.eager.multi_client.Sync()
elif oneflow.framework.distribute.get_rank() == 0:
oneflow._oneflow_internal.eager.single_client.Sync()
atexit.register(oneflow._oneflow_internal.SetShuttingDown)
atexit.register(oneflow._oneflow_internal.DestroyEnv)
atexit.register(oneflow.framework.session_context.TryCloseDefaultSession)
atexit.register(_SyncOnMasterFn)
del atexit
del oneflow
import oneflow.framework.docstr as docstr
from oneflow.framework.docstr.utils import register_docstr
register_docstr()
del register_docstr
del docstr
from oneflow.autograd import grad_enable, no_grad, inference_mode, is_grad_enabled
import oneflow.nn.image
import oneflow.nn.modules.acosh
import oneflow.nn.modules.activation
import oneflow.nn.modules.argwhere
import oneflow.nn.modules.atan2
import oneflow.nn.modules.atanh
import oneflow.nn.modules.bmm
import oneflow.nn.modules.constant
import oneflow.nn.modules.diag
import oneflow.nn.modules.flip
import oneflow.nn.modules.floor
import oneflow.nn.modules.greater
import oneflow.nn.modules.greater_equal
import oneflow.nn.modules.in_top_k
import oneflow.nn.modules.masked_select
import oneflow.nn.modules.math_ops
import oneflow.nn.modules.nonzero
import oneflow.nn.modules.norm
import oneflow.nn.modules.permute
import oneflow.nn.modules.round
import oneflow.nn.modules.sign
import oneflow.nn.modules.sinh
import oneflow.nn.modules.tan
import oneflow.nn.modules.tensor_ops
from oneflow.framework.check_point_v2 import Load as load
from oneflow.framework.check_point_v2 import save
from oneflow.framework.dtype import convert_oneflow_dtype_to_numpy_dtype, dtypes
from oneflow.framework.env_util import (
api_enable_eager_execution as enable_eager_execution,
)
from oneflow.framework.function_util import FunctionConfig
from oneflow.framework.function_util import FunctionConfig as function_config
from oneflow.framework.generator import create_generator as Generator
from oneflow.framework.generator import default_generator, manual_seed
# NOTE(chengcheng) oneflow.Model is unavailable now.
# from oneflow.framework.model import Model
from oneflow.framework.scope_util import api_current_scope as current_scope
from oneflow.framework.tensor import Tensor
from oneflow.framework.tensor import tensor as tensor
from oneflow.framework.tensor import is_nonzero
from oneflow.nn.modules.abs import abs_op as abs
from oneflow.nn.modules.acos import acos_op as acos
from oneflow.nn.modules.acosh import acosh_op as acosh
from oneflow.nn.modules.acosh import arccosh_op as arccosh
from oneflow.nn.modules.activation import gelu_op as gelu
from oneflow.nn.modules.activation import mish_op as mish
from oneflow.nn.modules.activation import sigmoid_op as sigmoid
from oneflow.nn.modules.activation import softmax_op as softmax
from oneflow.nn.modules.activation import tanh_op as tanh
from oneflow.nn.modules.activation import silu_op as silu
from oneflow.nn.modules.activation import selu_op as selu
from oneflow.nn.modules.activation import softsign_op as softsign
from oneflow.nn.modules.activation import mish_op as mish
from oneflow.nn.modules.adaptive_pool import (
adaptive_avg_pool1d,
adaptive_avg_pool2d,
adaptive_avg_pool3d,
)
from oneflow.nn.modules.arange import arange_op as arange
from oneflow.nn.modules.argmax import argmax_op as argmax
from oneflow.nn.modules.argsort import argsort_op as argsort
from oneflow.nn.modules.argwhere import argwhere_op as argwhere
from oneflow.nn.modules.atan2 import atan2_op as atan2
from oneflow.nn.modules.atanh import arctanh_op as arctanh
from oneflow.nn.modules.atanh import atanh_op as atanh
from oneflow.nn.modules.bmm import bmm_op as bmm
from oneflow.nn.modules.broadcast_like import broadcast_like_op as broadcast_like
from oneflow.nn.modules.cast import cast_op as cast
from oneflow.nn.modules.chunk import chunk_op as chunk
from oneflow.nn.modules.concat import concat_op as cat
from oneflow.nn.modules.constant import ones_like_op as ones_like
from oneflow.nn.modules.constant import ones_op as ones
from oneflow.nn.modules.constant import zeros_like_op as zeros_like
from oneflow.nn.modules.constant import zeros_op as zeros
from oneflow.nn.modules.empty import empty_op as empty
from oneflow.nn.modules.dataset import tensor_buffer_to_list_of_tensors
from oneflow.nn.modules.diag import diag_op as diag
from oneflow.nn.modules.eq import eq_op as eq
from oneflow.nn.modules.eq import eq_op as equal
from oneflow.nn.modules.exp import exp_op as exp
from oneflow.nn.modules.expand import expand_op as expand
from oneflow.nn.modules.flatten import _flow_flatten as flatten
from oneflow.nn.modules.flip import flip_op as flip
from oneflow.nn.modules.floor import floor_op as floor
from oneflow.nn.modules.gather import gather_op as gather
from oneflow.nn.modules.gather_nd import gather_nd_op as gather_nd
from oneflow.nn.modules.greater import greater_op as gt
from oneflow.nn.modules.greater_equal import greater_equal_op as ge
from oneflow.nn.modules.logical_and import logical_and_op as logical_and
from oneflow.nn.modules.logical_or import logical_or_op as logical_or
from oneflow.nn.modules.logical_xor import logical_xor_op as logical_xor
from oneflow.nn.modules.in_top_k import in_top_k_op as in_top_k
from oneflow.nn.modules.index_select import index_select_op as index_select
from oneflow.nn.modules.less import less_op as lt
from oneflow.nn.modules.less_equal import less_equal_op as le
from oneflow.nn.modules.log1p import log1p_op as log1p
from oneflow.nn.modules.masked_fill import masked_fill_op as masked_fill
from oneflow.nn.modules.masked_select import masked_select_op as masked_select
from oneflow.nn.modules.math_ops import _add as add
from oneflow.nn.modules.math_ops import _div as div
from oneflow.nn.modules.math_ops import _mul as mul
from oneflow.nn.modules.math_ops import _reciprocal as reciprocal
from oneflow.nn.modules.math_ops import _sub as sub
from oneflow.nn.modules.math_ops import addmm_op as addmm
from oneflow.nn.modules.math_ops import arcsin_op as arcsin
from oneflow.nn.modules.math_ops import arcsinh_op as arcsinh
from oneflow.nn.modules.math_ops import arctan_op as arctan
from oneflow.nn.modules.math_ops import asin_op as asin
from oneflow.nn.modules.math_ops import asinh_op as asinh
from oneflow.nn.modules.math_ops import atan_op as atan
from oneflow.nn.modules.math_ops import ceil_op as ceil
from oneflow.nn.modules.math_ops import clamp_op as clamp
from oneflow.nn.modules.math_ops import clip_op as clip
from oneflow.nn.modules.math_ops import cos_op as cos
from oneflow.nn.modules.math_ops import cosh_op as cosh
from oneflow.nn.modules.math_ops import erf_op as erf
from oneflow.nn.modules.math_ops import erfc_op as erfc
from oneflow.nn.modules.math_ops import expm1_op as expm1
from oneflow.nn.modules.math_ops import fmod_op as fmod
from oneflow.nn.modules.math_ops import log_op as log
from oneflow.nn.modules.math_ops import minimum as minimum
from oneflow.nn.modules.math_ops import maximum as maximum
from oneflow.nn.modules.math_ops import pow_op as pow
from oneflow.nn.modules.math_ops import rsqrt_op as rsqrt
from oneflow.nn.modules.math_ops import sin_op as sin
from oneflow.nn.modules.math_ops import sqrt_op as sqrt
from oneflow.nn.modules.math_ops import square_op as square
from oneflow.nn.modules.math_ops import std_op as std
from oneflow.nn.modules.math_ops import topk_op as topk
from oneflow.nn.modules.math_ops import variance_op as var
from oneflow.nn.modules.matmul import matmul_op as matmul
from oneflow.nn.modules.meshgrid import meshgrid_op as meshgrid
from oneflow.nn.modules.narrow import narrow_op as narrow
from oneflow.nn.modules.ne import ne_op as ne
from oneflow.nn.modules.ne import ne_op as not_equal
from oneflow.nn.modules.negative import negative_op as neg
from oneflow.nn.modules.negative import negative_op as negative
from oneflow.nn.modules.nonzero import nonzero_op as nonzero
from oneflow.nn.modules.random_ops import bernoulli
from oneflow.nn.modules.random_ops import rand_op as rand
from oneflow.nn.modules.random_ops import randn_op as randn
from oneflow.nn.modules.random_ops import randperm
from oneflow.nn.modules.reduce_ops import _max as max
from oneflow.nn.modules.reduce_ops import _mean as mean
from oneflow.nn.modules.reduce_ops import _min as min
from oneflow.nn.modules.reduce_ops import _sum as sum
from oneflow.nn.modules.reduce_ops import prod_op as prod
from oneflow.nn.modules.repeat import repeat_op as repeat
from oneflow.nn.modules.reshape import reshape_op as reshape
from oneflow.nn.modules.reshape import view_op as view
from oneflow.nn.modules.round import round_op as round
from oneflow.nn.modules.scatter_nd import _scatter_nd_op as scatter_nd
from oneflow.nn.modules.sign import sign_op as sign
from oneflow.nn.modules.sinh import sinh_op as sinh
from oneflow.nn.modules.slice import slice_op as slice
from oneflow.nn.modules.slice import slice_update_op as slice_update
from oneflow.nn.modules.slice import logical_slice_assign_op as logical_slice_assign
from oneflow.nn.modules.softplus import softplus_op as softplus
from oneflow.nn.modules.sort import sort_op as sort
from oneflow.nn.modules.split import split_op as split
from oneflow.nn.modules.squeeze import squeeze_op as squeeze
from oneflow.nn.modules.stack import stack
from oneflow.nn.modules.tan import tan_op as tan
from oneflow.nn.modules.eye import eye_op as eye
from oneflow.nn.modules.tensor_buffer import gen_tensor_buffer
from oneflow.nn.modules.tensor_buffer import (
tensor_buffer_to_tensor_op as tensor_buffer_to_tensor,
)
from oneflow.nn.modules.tensor_buffer import tensor_to_tensor_buffer
from oneflow.nn.modules.tile import tile_op as tile
from oneflow.nn.modules.to import to_op as to
from oneflow.nn.modules.consistent_cast import to_consistent_op as to_consistent
from oneflow.nn.modules.consistent_cast import to_local_op as to_local
from oneflow.nn.modules.transpose import transpose_op as transpose
from oneflow.nn.modules.triu import triu_op as triu
from oneflow.nn.modules.unsqueeze import unsqueeze_op as unsqueeze
from oneflow.nn.modules.where import where_op as where
from oneflow.nn.modules.scatter import *
from oneflow.ops.builtin_ops import BuiltinOp as builtin_op
from oneflow.ops.initializer_util import constant_initializer
from oneflow.ops.initializer_util import glorot_normal_initializer
from oneflow.ops.initializer_util import (
glorot_normal_initializer as xavier_normal_initializer,
)
from oneflow.ops.initializer_util import glorot_uniform_initializer
from oneflow.ops.initializer_util import (
glorot_uniform_initializer as xavier_uniform_initializer,
)
from oneflow.ops.initializer_util import (
kaiming_initializer,
ones_initializer,
random_normal_initializer,
random_uniform_initializer,
truncated_normal_initializer,
variance_scaling_initializer,
zeros_initializer,
)
from . import (
autograd,
distributed,
linalg,
optim,
boxing,
backends,
amp,
) # , saved_model NOTE(chengcheng): unavailable now
import oneflow.utils.data
import oneflow.utils.vision
from oneflow.nn.modules.relu import relu_op as relu
| [
"noreply@github.com"
] | big-data-ai.noreply@github.com |
20d726181454b0925c0dcdf3fe3d16da11d1069b | 5d5f6ba3bdcb52b4750a5f28afa8a1a1019bfc9e | /django/django_orm/booksAuthorsProject/booksAuthorsProject/wsgi.py | 11e09c9106d25e8c452ba88b489cac236a2e5054 | [] | no_license | eDiazGtz/pythonLearning | 06e96f2f5a6e48ac314cb815cf9fbf65d0b7c2c8 | 57d7b2292cf5d9769cce9adf765962c3c0930d6c | refs/heads/master | 2023-06-18T02:16:09.293375 | 2021-05-03T18:09:52 | 2021-05-03T18:09:52 | 335,090,531 | 0 | 0 | null | 2021-05-03T18:09:53 | 2021-02-01T21:35:24 | Python | UTF-8 | Python | false | false | 415 | py | """
WSGI config for booksAuthorsProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'booksAuthorsProject.settings')
application = get_wsgi_application()
| [
"ediaz-gutierrez@hotmail.com"
] | ediaz-gutierrez@hotmail.com |
40e2766769fe9bdd5d32877358a272fe431cd3a1 | 57ddfddd1e11db649536a8ed6e19bf5312d82d71 | /AtCoder/ABC1/ABC111/ABC111B.py | b38776b4863a4119930f4829db45f2d7a6cebc99 | [] | no_license | pgDora56/ProgrammingContest | f9e7f4bb77714dc5088c2287e641c0aa760d0f04 | fdf1ac5d1ad655c73208d98712110a3896b1683d | refs/heads/master | 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | N = int(input())
for i in range(N, 1000):
if i % 111 == 0:
print(i)
exit(0) | [
"doradora.prog@gmail.com"
] | doradora.prog@gmail.com |
7ad767b1b94d4c9a1df15c7bfc4abe595a0b2a13 | 325bee18d3a8b5de183118d02c480e562f6acba8 | /taiwan/italy/start.py | a394a4bb37e379ca7be1a371406ca0376d18a494 | [] | no_license | waynecanfly/spiderItem | fc07af6921493fcfc21437c464c6433d247abad3 | 1960efaad0d995e83e8cf85e58e1db029e49fa56 | refs/heads/master | 2022-11-14T16:35:42.855901 | 2019-10-25T03:43:57 | 2019-10-25T03:43:57 | 193,424,274 | 4 | 0 | null | 2022-11-04T19:16:15 | 2019-06-24T03:00:51 | Python | UTF-8 | Python | false | false | 1,038 | py | import os
"""
taiwanlistzh下载台湾中文列表,已做更新功能
taiwanlisten下载台湾英文列表,已做更新功能
info_enAll首次存量下载台湾英文基本信息
info_en为下载台湾增量基本信息而生
以下若要更新需覆盖
taiwanFileAllv3下载英文财报,原网站最新只到2018年3月份
info_zhAll下载中文基本信息:"重要子公司基本資料","重要子公司異動說明", "被投資控股公司基本資料" (文件)
info_zh下载中文基本信息:"公司基本資料" (格式化)
info_zh2下载中文基本信息:"歷年變更登記"(文件) 需要界面化才能获取数据,需要windows系统
"""
os.chdir('/root/spiderItem/taiwan/italy/spiders')
os.system("python3 taiwanlistzh.py")
# os.system('python3 info_zhAll.py')
os.chdir('/root/spiderItem/taiwan/italy/script2')
os.system("python3 taiwanlisten.py")
os.system('python3 info_en.py')
# os.system("python3 taiwanFileAllv3.py")
# os.system('python3 info_zh.py')
# os.system('python3 info_zh2.py')
| [
"1370153124@qq.com"
] | 1370153124@qq.com |
7053c848762cc8136391eac6e9c9e13ff7da84f8 | 5f12ba23f879bc96a20ae46aa882be7fbdcbee1d | /sfftk/core/configs.py | 07ab77eae08f2814c65e4e38532e898fd5094f96 | [
"Apache-2.0"
] | permissive | RosaryYao/sfftk | 41a3e33291394b20b96a1d2a19822be51300f4ee | 46e0890d6773bf3482b8e6b3dfe994417af00649 | refs/heads/master | 2022-08-25T19:39:22.513846 | 2020-05-27T08:53:51 | 2020-05-27T08:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,980 | py | # -*- coding: utf-8 -*-
"""
``sfftk.core.configs``
======================
This module defines classes and functions to correctly process persistent
configurations. Please see the :doc:`guide to miscellaneous operations <misc>`
for a complete description of working with configs.
"""
from __future__ import print_function
import os
import shutil
import sys
from sfftkrw.core import _dict, _dict_iter_items
from sfftkrw.core.print_tools import print_date
from .. import BASE_DIR
__author__ = 'Paul K. Korir, PhD'
__email__ = 'pkorir@ebi.ac.uk, paul.korir@gmail.com'
__date__ = '2016-08-23'
__updated__ = '2018-02-27'
class Configs(_dict):
"""Class defining configs
Configurations are stored in a subclass of :py:class:`OrderedDict` (normal :py:class:`dict` for Python 3.7+) with
appended methods for reading (:py:meth:`.Configs.read()`), writing (:py:meth:`.Configs.write`) and
clearing (:py:meth:`.Configs.clear`) configs.
Printing an object of this class displays all configs.
This class is used an argument to :py:func:`.configs.load_configs`.
"""
shipped_configs = os.path.join(BASE_DIR, 'sff.conf')
def __init__(self, config_fn, *args, **kwargs):
self.config_fn = config_fn
super(Configs, self).__init__(*args, **kwargs)
def clear(self):
"""Clear configs"""
items_to_clear = [item for item in self]
for item in items_to_clear:
del self[item]
def read(self):
"""Read configs from file"""
with open(self.config_fn, 'r') as f:
for row in f:
if row[0] == '#': # comments
continue
if row.strip() == '': # blank lines
continue
name, value = row.strip().split('=')
self[name.strip()] = value.strip()
def write(self):
"""Write configs to file"""
# you can't write to shipped configs
if self.config_fn == self.shipped_configs:
print_date("Unable to set configs to shipped configs.")
print_date("Please do not save configs into shipped configs. Use user or custom config files.")
return 1
with open(self.config_fn, 'w') as f:
for name, value in _dict_iter_items(self):
f.write('{}={}\n'.format(name, value))
return os.EX_OK
def __str__(self):
string = ""
for name, value in _dict_iter_items(self):
string += "{:<20} = {:<20}\n".format(name, value)
return string[:-1]
def get_config_file_path(args, user_folder='~/.sfftk', user_conf_fn='sff.conf', config_class=Configs):
"""A function that returns the right config path to use depending on the command specified
The user may specify
.. code-block:: bash
sff <cmd> [<sub_cmd>] [--shipped-configs|--config-path] [args...]`
and we have to decide which configs to use.
Example:
- View the notes in the file. If user configs are available use them otherwise use shipped configs
.. code-block:: bash
sff notes list file.json
- View the notes in the file but ONLY use shipped configs.
.. code-block:: bash
sff notes list --shipped-configs file.json
- View the notes in the file but ONLY use custom configs at path
.. code-block:: bash
sff notes list --config-path /path/to/sff.conf file.json
- Get available configs. First check for user configs and fall back on shipped configs
.. code-block:: bash
sff config get --all
- Get configs from the path
.. code-block:: bash
sff config get --config-path /path/to/sff.conf --all
# ignore shipped still!
sff config get --config-path /path/to/sff.conf --shipped-configs --all
- Get shipped configs even if user configs exist
.. code-block:: bash
sff config get --shipped-configs --all
- Set configs to user configs. If user configs don't exist copy shipped and add the new config.
.. code-block:: bash
sff config set NAME VALUE
- Set configs to config path. Ignore user and shipped configs
.. code-block:: bash
sff config set --config-path /path/to/sff.conf NAME VALUE
- Fail! Shipped configs are read-only
.. code-block:: bash
sff config set --shipped-configs NAME VALUE
:param args:
:param user_folder:
:param user_conf_fn:
:return:
"""
shipped_configs = config_class.shipped_configs
user_configs = os.path.expanduser(os.path.join(user_folder, user_conf_fn))
config_file_path = None
if args.subcommand == 'config':
# read-only: get
if args.config_subcommand == 'get':
if args.config_path is not None:
config_file_path = args.config_path
elif args.shipped_configs:
config_file_path = shipped_configs
elif os.path.exists(user_configs):
config_file_path = user_configs
else:
config_file_path = shipped_configs
# read-write: set, del
else:
if args.config_path is not None:
config_file_path = args.config_path
elif args.shipped_configs:
config_file_path = None
elif os.path.exists(user_configs):
config_file_path = user_configs
elif not os.path.exists(user_configs):
if args.verbose:
print_date("User configs not found")
try:
# make the dir if it doesn't exist
os.mkdir(os.path.dirname(user_configs))
except OSError:
pass
# copy the shipped configs to user configs
if args.verbose:
print_date("Copying shipped configs to user configs...")
shutil.copy(config_class.shipped_configs, user_configs)
config_file_path = user_configs
else:
if args.config_path is not None:
config_file_path = args.config_path
elif args.shipped_configs:
config_file_path = config_class.shipped_configs
elif os.path.exists(user_configs):
config_file_path = user_configs
else:
config_file_path = config_class.shipped_configs
return config_file_path
def load_configs(config_file_path, config_class=Configs):
"""Load configs from the given file
:param str config_file_path: a path to a file with configs
:param class config_class: the config class; default: Configs
:return configs: the configs
:rtype configs: Configs
"""
configs = config_class(config_file_path)
configs.read()
return configs
def get_configs(args, configs):
"""Get the value of the named config
:param args: parsed arguments
:type args: `argparse.Namespace`
:param dict configs: configuration options
:return status: status
:rtype status: int
"""
if args.all:
print_date("Listing all {} configs...".format(len(configs)))
# view the config object
# fixme: use print_date
print(configs, file=sys.stderr)
else:
print_date("Getting config {}...".format(args.name))
# obtain the named config
try:
config = configs[args.name]
except KeyError:
print_date("No config with name {}".format(args.name))
return 1
# view the config
# fixme: use print_date
print(config)
return os.EX_OK
def set_configs(args, configs):
"""Set the config of the given name to have the given value
:param args: parsed arguments
:type args: `argparse.Namespace`
:param dict configs: configuration options
:return status: status
:rtype status: int
"""
print_date("Setting config {} to value {}...".format(args.name, args.value))
# add the new config
configs[args.name] = args.value
if args.verbose:
# fixme: use print_date
print(configs)
# save the configs
return configs.write()
def del_configs(args, configs):
"""Delete the named config
:param args: parsed arguments
:type args: :py:class:`argparse.Namespace`
:param dict configs: configuration options
:return status: status
:rtype status: int
"""
if args.all:
print_date("Deleting all {} configs...".format(len(configs)))
# empty all values
configs.clear()
else:
# del the named config
print_date("Deleting config {} having value {}...".format(args.name, configs[args.name]))
try:
del configs[args.name]
except KeyError:
print_date("No config with name {}".format(args.name))
return os.EX_DATAERR
if args.verbose:
# fixme: use print_date
print(configs)
# save the config
return configs.write()
| [
"pkorir@ebi.ac.uk"
] | pkorir@ebi.ac.uk |
5b240e6a01eaaca3b6de4c49d75c041e4867cf3e | 6d1df0707865398d15f508390ca595215210b504 | /xmonad/poll_weather.py | 0d9a3c2243aa6bfa26632e7e321d24e34684e44b | [] | no_license | supermiiiiii/scripts | 94a27741432c40781b3d577334e72f73f1efb914 | 524de087175d2e8b7e3adeacdd648fed9e07e204 | refs/heads/master | 2023-02-24T19:15:38.888248 | 2021-01-30T14:49:49 | 2021-01-30T14:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,351 | py | """Writes a weather report to some bar using a FIFO."""
import datetime as dt
import re
import subprocess as sp # noqa: F401
import sys
import time
from typing import NamedTuple, Optional, Sequence
import gutils
from gutils.io import eprint
from loguru import logger as log
@gutils.catch
def main(argv: Sequence[str] = None) -> int:
if argv is None:
argv = sys.argv
args = parse_cli_args(argv)
gutils.logging.configure(__file__, debug=args.debug, verbose=args.verbose)
return run(args)
class Arguments(NamedTuple):
debug: bool
verbose: bool
zipcode: str
weather_cmd: str
attempts: int
timeout: int
max_delay: int
def parse_cli_args(argv: Sequence[str]) -> Arguments:
parser = gutils.ArgumentParser()
parser.add_argument(
"zipcode", nargs="?", default="08060", help="zip code of location"
)
parser.add_argument(
"--weather-cmd",
default="weather",
help=(
"The command used to retrieve the weather report from the"
" command-line."
),
)
parser.add_argument(
"-n",
"--attempts",
type=int,
default=7,
help=(
"How many times should we attempt to run this command in the event"
" of failure/timeout?"
),
)
parser.add_argument(
"-t",
"--timeout",
type=int,
default=30,
help=(
"How long should we wait (in seconds) for the this command to"
" complete?"
),
)
parser.add_argument(
"--max-delay",
default=300,
type=int,
help="The maximum sleep time between command attempts.",
)
args = parser.parse_args(argv[1:])
kwargs = dict(args._get_kwargs())
return Arguments(**kwargs)
def run(args: Arguments) -> int:
raw_output = run_weather_cmd(
args.weather_cmd,
args.zipcode,
attempts=args.attempts,
timeout=args.timeout,
max_delay=args.max_delay,
)
if raw_output is None:
eprint(f"[ERROR] The {args.weather_cmd!r} command failed.")
return 1
loc = get_group("Current conditions at (.*)\n", raw_output)
temp = get_temp(raw_output)
humidity = get_humidity(raw_output)
sky = get_group(r"Sky conditions: ([A-z\s]+)$", raw_output)
wind = get_wind(raw_output)
assert loc is not None
report = format_report(loc, temp, sky, wind, humidity)
print(report)
return 0
def run_weather_cmd(
weather_cmd: str,
zipcode: str,
*,
attempts: int,
timeout: int,
max_delay: int,
) -> Optional[str]:
"""Runs the 'weather' command.
Returns:
Raw output of 'weather' command.
"""
cmd_list = [weather_cmd]
opts = ["--setpath", "/usr/share/weather-util", zipcode, "--no-cache"]
cmd_list.extend(opts)
def log_cmd(msg: str) -> None:
msg = "{!r} command: {}".format(weather_cmd, msg)
log.debug(msg)
rc = None
for i in range(attempts):
if i > 0:
# delay => 10s, 20s, 40s, 80s, ..., max_delay
delay = min(max_delay, 2 ** (i - 1) * 10)
log.debug(f"Waiting {delay}s before trying again.")
time.sleep(delay)
log_cmd(f"Attempt #{i + 1}")
child = sp.Popen(cmd_list, stdout=sp.PIPE, stderr=sp.PIPE)
try:
stdout, stderr = child.communicate(timeout=timeout)
except sp.TimeoutExpired:
log_cmd(f"TIMEOUT (after {timeout}s)")
else:
rc = child.returncode
output = stdout.decode().strip()
if rc == 0:
log_cmd("SUCCESS")
break
output += stderr.decode().strip()
log_cmd(f"FAILURE: {output}")
if rc == 0:
return output
else:
return None
def get_temp(raw_output: str) -> str:
"""Returns temperature."""
temp = get_group(r"Temperature: ([0-9]+\.[0-9]) F", raw_output)
if temp is None:
return "N/A"
else:
return f"{round(float(temp))} F"
def get_humidity(raw_output: str) -> Optional[str]:
humidity = get_group("Humidity:[ ]*([1-9][0-9]*%)", raw_output)
return humidity
def get_wind(raw_output: str) -> Optional[str]:
"""Returns wind description."""
wind = get_group(r"Wind: .*?([0-9\-]+ MPH)", raw_output)
if wind is None:
wind = get_group(r"Wind: (.*)", raw_output)
return wind
def get_group(pttrn: str, string: str) -> Optional[str]:
"""Returns the first group matched from a regex pattern."""
match = re.search(pttrn, string, re.M)
if match:
return match.groups()[0]
else:
return None
def format_report(
_loc: str,
temp: str,
sky: Optional[str],
wind: Optional[str],
humidity: Optional[str],
) -> str:
"""Formats weather report."""
report_fmt = "{} ::: TEMP: {}"
now = dt.datetime.now()
timestamp = now.strftime("@%H:%M:%S")
report = report_fmt.format(timestamp, temp)
if humidity is not None:
report = f"{report} | HUMIDITY: {humidity}"
if sky is not None:
report = f"{report} | SKY: {sky}"
if wind is not None:
report = f"{report} | WIND: {wind}"
return report
if __name__ == "__main__":
sys.exit(main())
| [
"bryanbugyi34@gmail.com"
] | bryanbugyi34@gmail.com |
3e063e740006b9aab8f0c31edc73a70926e13dd6 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/ui/station/fitting/minihangar.py | 1990c0fe486beccfd0fdcb8e5e6616770fb04410 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,161 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\station\fitting\minihangar.py
from carbonui.primitives.container import Container
from carbonui.primitives.fill import Fill
from eve.client.script.ui.shared.fitting.fittingStatsChanges import FittingStatsChanges
from inventorycommon.util import IsShipFittingFlag, IsShipFittable
import uicontrols
import uthread
import util
import carbonui.const as uiconst
import localization
import invCtrl
class CargoSlots(Container):
default_state = uiconst.UI_NORMAL
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
self.controller = attributes.controller
self.controller.on_stats_changed.connect(self.UpdateCargoSpace)
invController = self.GetInvController()
self.sr.icon = uicontrols.Icon(parent=self, size=32, state=uiconst.UI_DISABLED, ignoreSize=True, icon=invController.GetIconName())
self.sr.hint = invController.GetName()
self.sr.hilite = Fill(parent=self, name='hilite', align=uiconst.RELATIVE, state=uiconst.UI_HIDDEN, idx=-1, width=32, height=self.height)
self.sr.icon.color.a = 0.8
Container(name='push', parent=self, align=uiconst.TOLEFT, width=32)
self.sr.statusCont = Container(name='statusCont', parent=self, align=uiconst.TOLEFT, width=50)
self.sr.statustext1 = uicontrols.EveLabelMedium(text='status', parent=self.sr.statusCont, name='cargo_statustext', left=0, top=2, idx=0, state=uiconst.UI_DISABLED, align=uiconst.TOPRIGHT)
self.sr.statustext2 = uicontrols.EveLabelMedium(text='status', parent=self.sr.statusCont, name='cargo_statustext', left=0, top=14, idx=0, state=uiconst.UI_DISABLED, align=uiconst.TOPRIGHT)
m3TextCont = Container(name='m3Cont', parent=self, align=uiconst.TOLEFT, width=12)
self.sr.m3Text = uicontrols.EveLabelMedium(text=localization.GetByLabel('UI/Fitting/FittingWindow/CubicMeters'), parent=m3TextCont, name='m3', left=4, top=14, idx=0)
sm.GetService('inv').Register(self)
self.invReady = 1
self.UpdateCargoSpace()
def IsItemHere(self, item):
return self.GetInvController().IsItemHere(item)
def AddItem(self, item):
self.Update()
def UpdateItem(self, item, *etc):
self.Update()
def RemoveItem(self, item):
self.Update()
def OnMouseEnter(self, *args):
self.DoMouseEntering()
def OnMouseEnterDrone(self, *args):
if eve.session.stationid:
self.DoMouseEntering()
def DoMouseEntering(self):
self.Hilite(1)
self.sr.statustext1.OnMouseEnter()
self.sr.statustext2.OnMouseEnter()
self.sr.m3Text.OnMouseEnter()
def OnMouseExit(self, *args):
self.Hilite(0)
self.sr.statustext1.OnMouseExit()
self.sr.statustext2.OnMouseExit()
self.sr.m3Text.OnMouseExit()
uthread.new(self.Update)
def Hilite(self, state):
self.sr.icon.color.a = [0.8, 1.0][state]
def SetStatusText(self, text1, text2, color):
self.sr.statustext1.text = text1
self.sr.statustext2.text = localization.GetByLabel('UI/Fitting/FittingWindow/CargoUsage', color=color, text=text2)
self.sr.statusCont.width = max(0, self.sr.statustext1.textwidth, self.sr.statustext2.textwidth)
def OnDropData(self, dragObj, nodes):
self.Hilite(0)
def Update(self, multiplier = 1.0):
uthread.new(self._Update, multiplier)
def _Update(self, multiplier):
cap = self.GetCapacity()
if not cap:
return
if not self or self.destroyed:
return
cap2 = cap.capacity * multiplier
color = '<color=0xc0ffffff>'
if multiplier != 1.0:
color = '<color=0xffffff00>'
used = util.FmtAmt(cap.used, showFraction=1)
cap2 = util.FmtAmt(cap2, showFraction=1)
self.SetStatusText(used, cap2, color)
def GetCapacity(self, flag = None):
return self.GetInvController().GetCapacity()
class CargoDroneSlots(CargoSlots):
def GetInvController(self):
return invCtrl.ShipDroneBay(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
invCtrl.ShipDroneBay(util.GetActiveShip()).OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
uicore.cmd.OpenDroneBayOfActiveShip()
def UpdateCargoSpace(self):
typeID = self.controller.GetGhostFittedTypeID()
fittingChanges = FittingStatsChanges(typeID)
xtraDroneSpace = fittingChanges.GetExtraDroneSpaceMultiplier()
self.Update(xtraDroneSpace)
class CargoFighterSlots(CargoSlots):
def GetInvController(self):
return invCtrl.ShipFighterBay(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
self.GetInvController().OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
uicore.cmd.OpenFighterBayOfActiveShip()
def UpdateCargoSpace(self):
typeID = self.controller.GetGhostFittedTypeID()
fittingChanges = FittingStatsChanges(typeID)
xtraFighterSpace = fittingChanges.GetExtraFighterSpaceMultiplier()
self.Update(xtraFighterSpace)
class CargoStructureAmmoBay(CargoSlots):
def GetInvController(self):
return invCtrl.StructureAmmoBay(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
self.GetInvController().OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
invID = ('StructureAmmoBay', self.controller.GetItemID())
from eve.client.script.ui.shared.inventory.invWindow import Inventory
Inventory.OpenOrShow(invID, usePrimary=False, toggle=True)
def UpdateCargoSpace(self):
self.Update()
class CargoCargoSlots(CargoSlots):
def GetInvController(self):
return invCtrl.ShipCargo(self.controller.GetItemID())
def OnDropData(self, dragObj, nodes):
self.Hilite(0)
if len(nodes) == 1:
item = nodes[0].item
if IsShipFittingFlag(item.flagID):
dogmaLocation = sm.GetService('clientDogmaIM').GetDogmaLocation()
shipID = util.GetActiveShip()
if IsShipFittable(item.categoryID):
dogmaLocation.UnloadModuleToContainer(shipID, item.itemID, (shipID,), flag=const.flagCargo)
return
if item.categoryID == const.categoryCharge:
dogmaLocation.UnloadChargeToContainer(shipID, item.itemID, (shipID,), const.flagCargo)
return
invCtrl.ShipCargo(util.GetActiveShip()).OnDropData(nodes)
CargoSlots.OnDropData(self, dragObj, nodes)
def OnClick(self, *args):
uicore.cmd.OpenCargoHoldOfActiveShip()
def UpdateCargoSpace(self):
typeID = self.controller.GetGhostFittedTypeID()
fittingChanges = FittingStatsChanges(typeID)
xtraCargoSpace = fittingChanges.GetExtraCargoSpaceMultiplier()
self.Update(xtraCargoSpace)
| [
"le02005@163.com"
] | le02005@163.com |
7f330b9f70088b20251a7e199f7b97aeee3e03db | 81e302a2fe2035d13710d6aa9b13fb763dcf8fa4 | /chapter4/create_table_02.py | b661782e443bcadfeb96ee328a3d727d0a3d1fd2 | [] | no_license | liuyuzhou/databasesourcecode | 8a76099efc86292b1449c3a84b35ba02398bcbe9 | 1f3ad5f27d194c2aa88fa8cb39c6ae92ee3d1f1f | refs/heads/master | 2021-07-18T06:18:28.538719 | 2021-07-03T01:56:58 | 2021-07-03T01:56:58 | 250,950,207 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
# 声明映射
Base = declarative_base()
# 定义Course对象,课程表对象
class Course(Base):
# 表的名字
__tablename__ = 'course'
id = Column(Integer, primary_key=True)
course_name = Column(String(20), default=None, nullable=False, comment='课程名称')
teacher_name = Column(String(20), default=None, nullable=False, comment='任课老师')
class_times = Column(Integer, default=0, nullable=False, comment='课时')
# 定义__repr__函数,返回一个可以用来表示对象的可打印字符串
def __repr__(self):
c_name = self.course_name
t_name = self.teacher_name
c_times = self.class_times
return f"Course:(course_name={c_name}, teacher_name={t_name}, class_times={c_times})"
| [
"jxgzyuzhouliu@163.com"
] | jxgzyuzhouliu@163.com |
32522a5eab011726780f8c979c342377bbfb2563 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/delegatednetwork/v20210315/get_orchestrator_instance_service_details.py | c397fabf8aef2763a3d0ef27c19eabb3382a5577 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 9,884 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetOrchestratorInstanceServiceDetailsResult',
'AwaitableGetOrchestratorInstanceServiceDetailsResult',
'get_orchestrator_instance_service_details',
]
@pulumi.output_type
class GetOrchestratorInstanceServiceDetailsResult:
"""
Represents an instance of a orchestrator.
"""
def __init__(__self__, api_server_endpoint=None, cluster_root_ca=None, controller_details=None, id=None, identity=None, kind=None, location=None, name=None, orchestrator_app_id=None, orchestrator_tenant_id=None, private_link_resource_id=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if api_server_endpoint and not isinstance(api_server_endpoint, str):
raise TypeError("Expected argument 'api_server_endpoint' to be a str")
pulumi.set(__self__, "api_server_endpoint", api_server_endpoint)
if cluster_root_ca and not isinstance(cluster_root_ca, str):
raise TypeError("Expected argument 'cluster_root_ca' to be a str")
pulumi.set(__self__, "cluster_root_ca", cluster_root_ca)
if controller_details and not isinstance(controller_details, dict):
raise TypeError("Expected argument 'controller_details' to be a dict")
pulumi.set(__self__, "controller_details", controller_details)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if orchestrator_app_id and not isinstance(orchestrator_app_id, str):
raise TypeError("Expected argument 'orchestrator_app_id' to be a str")
pulumi.set(__self__, "orchestrator_app_id", orchestrator_app_id)
if orchestrator_tenant_id and not isinstance(orchestrator_tenant_id, str):
raise TypeError("Expected argument 'orchestrator_tenant_id' to be a str")
pulumi.set(__self__, "orchestrator_tenant_id", orchestrator_tenant_id)
if private_link_resource_id and not isinstance(private_link_resource_id, str):
raise TypeError("Expected argument 'private_link_resource_id' to be a str")
pulumi.set(__self__, "private_link_resource_id", private_link_resource_id)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="apiServerEndpoint")
def api_server_endpoint(self) -> Optional[str]:
"""
K8s APIServer url. Either one of apiServerEndpoint or privateLinkResourceId can be specified
"""
return pulumi.get(self, "api_server_endpoint")
@property
@pulumi.getter(name="clusterRootCA")
def cluster_root_ca(self) -> Optional[str]:
"""
RootCA certificate of kubernetes cluster base64 encoded
"""
return pulumi.get(self, "cluster_root_ca")
@property
@pulumi.getter(name="controllerDetails")
def controller_details(self) -> 'outputs.ControllerDetailsResponse':
"""
Properties of the controller.
"""
return pulumi.get(self, "controller_details")
@property
@pulumi.getter
def id(self) -> str:
"""
An identifier that represents the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.OrchestratorIdentityResponse']:
"""
The identity of the orchestrator
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> str:
"""
The kind of workbook. Choices are user and shared.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orchestratorAppId")
def orchestrator_app_id(self) -> Optional[str]:
"""
AAD ID used with apiserver
"""
return pulumi.get(self, "orchestrator_app_id")
@property
@pulumi.getter(name="orchestratorTenantId")
def orchestrator_tenant_id(self) -> Optional[str]:
"""
TenantID of server App ID
"""
return pulumi.get(self, "orchestrator_tenant_id")
@property
@pulumi.getter(name="privateLinkResourceId")
def private_link_resource_id(self) -> Optional[str]:
"""
private link arm resource id. Either one of apiServerEndpoint or privateLinkResourceId can be specified
"""
return pulumi.get(self, "private_link_resource_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current state of orchestratorInstance resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
Resource guid.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of resource.
"""
return pulumi.get(self, "type")
class AwaitableGetOrchestratorInstanceServiceDetailsResult(GetOrchestratorInstanceServiceDetailsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOrchestratorInstanceServiceDetailsResult(
api_server_endpoint=self.api_server_endpoint,
cluster_root_ca=self.cluster_root_ca,
controller_details=self.controller_details,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
name=self.name,
orchestrator_app_id=self.orchestrator_app_id,
orchestrator_tenant_id=self.orchestrator_tenant_id,
private_link_resource_id=self.private_link_resource_id,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_orchestrator_instance_service_details(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrchestratorInstanceServiceDetailsResult:
"""
Represents an instance of a orchestrator.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:delegatednetwork/v20210315:getOrchestratorInstanceServiceDetails', __args__, opts=opts, typ=GetOrchestratorInstanceServiceDetailsResult).value
return AwaitableGetOrchestratorInstanceServiceDetailsResult(
api_server_endpoint=__ret__.api_server_endpoint,
cluster_root_ca=__ret__.cluster_root_ca,
controller_details=__ret__.controller_details,
id=__ret__.id,
identity=__ret__.identity,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
orchestrator_app_id=__ret__.orchestrator_app_id,
orchestrator_tenant_id=__ret__.orchestrator_tenant_id,
private_link_resource_id=__ret__.private_link_resource_id,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| [
"noreply@github.com"
] | morrell.noreply@github.com |
829ec35d9450bf7dddf39940d17e80553d22d4b8 | fd529ba6ade52cd2a3dab94da01252d7ea90398d | /zerojudge/b130.py | aed6f2b48f8d15ee266fb51fd69118bdc7533a8d | [] | no_license | fjfhfjfjgishbrk/AE401-Python | 4a984deb0281542c205d72695285b35c7413338f | ee80fa4588b127cff2402fd81e732ede28a66411 | refs/heads/master | 2022-06-13T13:49:39.875567 | 2022-05-28T12:40:51 | 2022-05-28T12:40:51 | 251,178,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 11:16 2020
@author: fdbfvuie
"""
while 1:
try:
input()
a = [int(i) for i in input().split()]
a = list(dict.fromkeys(a))
a.sort()
print(len(a))
print(" ".join([str(i) for i in a]))
except:
break | [
"59891511+fjfhfjfjgishbrk@users.noreply.github.com"
] | 59891511+fjfhfjfjgishbrk@users.noreply.github.com |
b8abb10ab99545daf4c9bf1ff199c941fd73e82e | 6cd3de9d6aa0c52602010aa857966d5dc4d57442 | /_unittests/ut_testing/data/plot_anomaly_comparison.py | 81f4f31d3c39d1702e592d94d0ac07921367ed62 | [
"MIT"
] | permissive | xadupre/mlprodict | 2307ca96eafeeafff08d5322184399bb5dc1c37e | f82c8a26a60104948c67849b1c4af95ca812c153 | refs/heads/master | 2022-12-10T18:50:36.953032 | 2020-09-03T08:53:58 | 2020-09-03T08:53:58 | 292,824,744 | 1 | 0 | NOASSERTION | 2020-09-04T10:56:45 | 2020-09-04T10:56:44 | null | UTF-8 | Python | false | false | 3,372 | py | import time
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.datasets import make_moons, make_blobs
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
# Example settings
n_samples = 300
outliers_fraction = 0.15
n_outliers = int(outliers_fraction * n_samples)
n_inliers = n_samples - n_outliers
# define outlier/anomaly detection methods to be compared
anomaly_algorithms = [
("Robust covariance", EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM", svm.OneClassSVM(nu=outliers_fraction, kernel="rbf",
gamma=0.1)),
("Isolation Forest", IsolationForest(contamination=outliers_fraction,
random_state=42)),
("Local Outlier Factor", LocalOutlierFactor(
n_neighbors=35, contamination=outliers_fraction))]
# Define datasets
blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2)
datasets = [
make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5,
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5],
**blobs_params)[0],
make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, .3],
**blobs_params)[0],
4. * (make_moons(n_samples=n_samples, noise=.05, random_state=0)[0] -
np.array([0.5, 0.25])),
14. * (np.random.RandomState(42).rand(n_samples, 2) - 0.5)] # pylint: disable=E1101
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 150),
np.linspace(-7, 7, 150))
plt.figure(figsize=(len(anomaly_algorithms) * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
rng = np.random.RandomState(42) # pylint: disable=E1101
for i_dataset, X in enumerate(datasets):
# Add outliers
X = np.concatenate([X, rng.uniform(low=-6, high=6,
size=(n_outliers, 2))], axis=0)
for name, algorithm in anomaly_algorithms:
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
plt.subplot(len(datasets), len(anomaly_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
# fit the data and tag outliers
if name == "Local Outlier Factor":
y_pred = algorithm.fit_predict(X)
else:
y_pred = algorithm.fit(X).predict(X)
# plot the levels lines and the points
if name != "Local Outlier Factor": # LOF does not implement predict
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='black')
colors = np.array(['#377eb8', '#ff7f00'])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2])
plt.xlim(-7, 7)
plt.ylim(-7, 7)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
a756e899283617e6565cb60bca961f619d739868 | e95f65f1e320e56306c4442329b756fdd6ed00fa | /docs/conf.py | 895973e0dc0a1a87a40edc0bfeedfb11f8ec6bf3 | [
"Apache-2.0",
"MIT"
] | permissive | 0xflotus/CUP | 3a808248126a9ef8d3436a1aadf2384fcb869acb | 5e4330cb2e4ccdc67ad94f0084e745eed6f96d6b | refs/heads/master | 2020-03-31T06:08:29.823081 | 2018-09-30T13:32:23 | 2018-09-30T13:32:23 | 151,969,632 | 0 | 0 | NOASSERTION | 2018-10-07T17:56:57 | 2018-10-07T17:56:57 | null | UTF-8 | Python | false | false | 5,882 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, u'/Users/baidu/baidu/code/open-source/python/cup_on_github/cup')
# -- Project information -----------------------------------------------------
project = u'cup'
copyright = u'2018, CUP-DEV'
author = u'CUP-DEV'
# The short X.Y version
version = u'1.7'
# The full version, including alpha/beta/rc tags
release = u'1.7.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': '',
'analytics_id': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
# 'vcs_pageview_mode': '',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cupdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cup.tex', u'cup Documentation',
u'Author', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cup', u'cup Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cup', u'cup Documentation',
author, 'cup', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"mythmgn@gmail.com"
] | mythmgn@gmail.com |
afe4f56aa89d5c27d8f6f060faf8558ce119f33e | ac7b9717ee65f1c09ae50aa17463ae508bc92164 | /cristianoronaldoyopmailcom_332/urls.py | ee3a8f7f327dbfd2c0392a5553da9f262e9970ef | [] | no_license | payush/cristianoronaldoyopmailcom-332 | c4fd53c39130d987d49ac0abc499e481580360b0 | f9100561da0dc9163b6377afb630e84e888023ac | refs/heads/master | 2020-03-23T19:18:46.561459 | 2018-07-23T05:58:41 | 2018-07-23T05:58:41 | 141,968,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | """cristianoronaldoyopmailcom_332 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
a1605ab46f979a8f2dccfce926267377f4662068 | 3670f46666214ef5e1ce6765e47b24758f3614a9 | /oneflow/python/test/ops/test_summary.py | abfbb915308413ee0f84e72659ac02d44f959708 | [
"Apache-2.0"
] | permissive | ashing-zhang/oneflow | 0b8bb478ccd6cabea2dca0864defddab231919bf | 70db228a4d361c916f8f8d85e908795b479e5d20 | refs/heads/master | 2022-12-14T21:13:46.752535 | 2020-09-07T03:08:52 | 2020-09-07T03:08:52 | 293,535,931 | 1 | 0 | Apache-2.0 | 2020-09-07T13:28:25 | 2020-09-07T13:28:24 | null | UTF-8 | Python | false | false | 6,006 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
import tensorflow as tf
import oneflow as flow
from collections import OrderedDict
import cv2
import time
from test_util import GenArgList
def _read_images_by_cv(image_files):
images = [
cv2.cvtColor(cv2.imread(image_file), cv2.COLOR_BGR2RGB).astype(np.uint8)
for image_file in image_files
]
return [cv2.resize(image, (512, 512)) for image in images]
def summary_demo():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
logdir = "/oneflow/log"
@flow.global_function(function_config=func_config)
def CreateWriter():
flow.summary.create_summary_writer(logdir)
@flow.global_function(function_config=func_config)
def ScalarJob(
value: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.float),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
tag: flow.typing.ListNumpy.Placeholder((1000,), dtype=flow.int8),
):
flow.summary.scalar(value, step, tag)
@flow.global_function(function_config=func_config)
def HistogramJob(
value: flow.typing.ListNumpy.Placeholder((200, 200, 200), dtype=flow.float),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
tag: flow.typing.ListNumpy.Placeholder((9,), dtype=flow.int8),
):
flow.summary.histogram(value, step, tag)
@flow.global_function(function_config=func_config)
def PbJob(
value: flow.typing.ListNumpy.Placeholder((1500,), dtype=flow.int8),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
):
flow.summary.pb(value, step=step)
@flow.global_function(function_config=func_config)
def ImageJob(
value: flow.typing.ListNumpy.Placeholder(
shape=(100, 2000, 2000, 4), dtype=flow.uint8
),
step: flow.typing.ListNumpy.Placeholder((1,), dtype=flow.int64),
tag: flow.typing.ListNumpy.Placeholder((10,), dtype=flow.int8),
):
flow.summary.image(value, step=step, tag=tag)
@flow.global_function(function_config=func_config)
def FlushJob():
flow.summary.flush_summary_writer()
CreateWriter()
projecotr = flow.summary.Projector(logdir)
projecotr.create_embedding_projector()
projecotr.create_exception_projector()
hparams = {
flow.summary.HParam("learning_rate", flow.summary.RealRange(1e-2, 1e-1)): 0.02,
flow.summary.HParam("dense_layers", flow.summary.IntegerRange(2, 7)): 5,
flow.summary.HParam(
"optimizer", flow.summary.ValueSet(["adam", "sgd"])
): "adam",
flow.summary.HParam("accuracy", flow.summary.RealRange(1e-2, 1e-1)): 0.001,
flow.summary.HParam("magic", flow.summary.ValueSet([False, True])): True,
flow.summary.Metric("loss", float): 0.02,
"dropout": 0.6,
}
for i in range(200):
t = ["vgg16", "resnet50", "mask-rcnn", "yolov3"]
pb = flow.summary.text(t)
value = np.fromstring(str(pb), dtype=np.int8)
step = np.array([i], dtype=np.int64)
PbJob([value], [step])
pb2 = flow.summary.hparams(hparams)
value = np.fromstring(str(pb2), dtype=np.int8)
step = np.array([i], dtype=np.int64)
PbJob([value], [step])
for idx in range(10):
value = np.array([idx], dtype=np.float32)
step = np.array([idx], dtype=np.int64)
tag = np.fromstring("scalar", dtype=np.int8)
ScalarJob([value], [step], [tag])
value = np.array(
[
[[1, 2, 3, 0], [0, 2, 3, 1], [2, 3, 4, 1]],
[[1, 0, 2, 0], [2, 1, 2, 0], [2, 1, 1, 1]],
],
dtype=np.float64,
)
for idx in range(1):
value = np.random.rand(100, 100, 100).astype(np.float32)
step = np.array([idx], dtype=np.int64)
tag = np.fromstring("histogram", dtype=np.int8)
HistogramJob([value], [step], [tag])
value_ = np.random.rand(10, 10, 10).astype(np.float32)
label = (np.random.rand(10) * 10).astype(np.int64)
x = (np.random.rand(10, 10, 10) * 255).astype(np.uint8)
sample_name = "sample"
sample_type = "image"
step = 1
tag_exception = "exception_projector"
tag_embedding = "embedding_projector"
projecotr.exception_projector(
value=value,
tag=tag_exception,
step=step,
sample_name=sample_name,
sample_type=sample_type,
x=x,
)
projecotr.embedding_projector(
value=value,
label=label,
tag=tag_embedding,
step=step,
sample_name=sample_name,
sample_type=sample_type,
x=x,
)
image1_path = "~/oneflow/image1"
image2_path = "~/oneflow/image2"
image_files = [
image1_path,
image2_path,
]
images = _read_images_by_cv(image_files)
images = np.array(images, dtype=np.uint8)
imageRed = np.ones([512, 512, 3]).astype(np.uint8)
Red = np.array([0, 255, 255], dtype=np.uint8)
imageNew = np.multiply(imageRed, Red)
imageNew = np.expand_dims(imageNew, axis=0)
images = np.concatenate((images, imageNew), axis=0)
step = np.array([1], dtype=np.int64)
tag = np.fromstring("image", dtype=np.int8)
ImageJob([images], [step], [tag])
graph = flow.summary.Graph(logdir)
graph.write_structure_graph()
| [
"noreply@github.com"
] | ashing-zhang.noreply@github.com |
946f16bbbb68b0e88e2045b3bbb65935e136bcb4 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py | 8b14c8327f08902044f50483f9f8dfe67b58cd70 | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 3,534 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class InvertBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
with self.test_session():
for fwd in [
bijectors.Identity(),
bijectors.Exp(),
bijectors.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
bijectors.Softplus(),
bijectors.SoftmaxCentered(),
]:
rev = bijectors.Invert(fwd)
self.assertEqual("_".join(["invert", fwd.name]), rev.name)
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(fwd.inverse(x).eval(), rev.forward(x).eval())
self.assertAllClose(fwd.forward(x).eval(), rev.inverse(x).eval())
self.assertAllClose(
fwd.forward_log_det_jacobian(x, event_ndims=1).eval(),
rev.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
fwd.inverse_log_det_jacobian(x, event_ndims=1).eval(),
rev.forward_log_det_jacobian(x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.Exp())
assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.test_session():
bijector = bijectors.Invert(bijectors.SoftmaxCentered(validate_args=True))
x = tensor_shape.TensorShape([2])
y = tensor_shape.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
def testDocstringExample(self):
with self.test_session():
exp_gamma_distribution = (
transformed_distribution_lib.TransformedDistribution(
distribution=gamma_lib.Gamma(concentration=1., rate=2.),
bijector=bijectors.Invert(bijectors.Exp())))
self.assertAllEqual(
[], array_ops.shape(exp_gamma_distribution.sample()).eval())
if __name__ == "__main__":
test.main()
| [
"thomas.warfel@pnnl.gov"
] | thomas.warfel@pnnl.gov |
dc95b43f050b1a562cd5b337aa8b009fa051bb29 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_43/223.py | e3f471696da9db5f1e2d23a13dbcff77e5f7668f | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,509 | py | #!/usr/bin/python
import os
import sys
import math
def read_input_NN(fn =""):
fh = open(fn, "r")
lines = map(lambda x: x.strip(), fh.readlines())
fh.close()
goog_N = map(int, lines[0].split())[0]
l_dict = lines[1:]
return(l_dict)
def sum_square(str1="123"):
sum1 = 0
for i in str1:
sum1 += int(i)*int(i)
return(sum1)
def tobase(base,number):
global tb
#http://myphotoblogbeta.blogspot.com/2007/07/python-convert-to-and-from-base-b.html
def tb(b,n,result=''):
if n == 0: return result
else: return tb(b,n/b,str(n%b)+result)
if type(base) != type(1):
raise TypeError, 'invalid base for tobase()'
if base <= 0:
raise ValueError, 'invalid base for tobase(): %s' % base
if type(number) != type(1) and type(number) != type(1L):
raise TypeError, 'tobase() of non-integer'
if number == 0:
return '0'
if number > 0:
return tb(base, number)
if number < 0:
return '-' + tb(base, -1*number)
def determine_happy(base1 = 10,num1 = "83"):
last_num="0"
d_found = {}
num1 = tobase(base1,int(num1))
#print num1
while(num1!="1"):
num1 = tobase(base1,sum_square(num1))
#print num1
if last_num == num1:
break
if num1 == "1":
break
last_num = num1
if num1 in d_found.keys():
break
d_found[num1]=1
if num1 == "1":
return(1)
return(0)
def find_smallest(l2=[1,2,3]):
i_c=1
l2 = filter(lambda x: x!=2, l2)
if len(l2) == 0:
return(1)
for i in xrange(2,1000000):
#print i
#print l2
i_c=i
i_s = str(i)
is_happy = map(lambda x: determine_happy(x,str(i)),l2)
#print is_happy
prod = 1
for j in is_happy:
prod *= j
if prod == 1:
break
return(i_c)
def small_base(str1="123"):
l2 = list(str1)
#print l2
set1 = set(l2)
d_map={}
dec_list = [1,0]+range(2,100)
dec_i = 0
for i in l2:
if i not in d_map.keys():
d_map[i]=dec_list[dec_i]
dec_i+=1
#print d_map
l2 = map(lambda x: d_map[x],l2)
#print l2
base1 = max([2,len(set1)])
#print base1
num1 = 0
for (ctr,i) in enumerate(l2[::-1]):
num1+=math.pow(base1,ctr)*i
return(num1)
def qa(fn="sample"):
l1 = read_input_NN(fn)
#print l1
return(l1)
#l1 = qa(fn="A-large.in.txt")
l1 = qa(fn="A-small-attempt0-1.in.txt")
#print l1
fh = open("out.txt","w")
for (ctr,sol) in enumerate(l1):
print >> fh, "Case #"+str(ctr+1)+": "+str(int(small_base(sol)+.001))
#print small_base(sol)
fh.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
aceb95c7fc2ae2e177bc60e8453b07a43eacbd83 | 2871a5c3d1e885ee72332dbd8ff2c015dbcb1200 | /o2despy/demos/demo2/demo2.py | 46c6865ce1c61201973f24e6aaf3a88ccd687044 | [
"MIT"
] | permissive | huawei-noah/noah-research | 297476299ad040552e44656541858145de72d141 | 82c49c36b76987a46dec8479793f7cf0150839c6 | refs/heads/master | 2023-08-16T19:29:25.439701 | 2023-08-14T03:11:49 | 2023-08-14T03:11:49 | 272,853,727 | 816 | 171 | null | 2023-09-12T01:28:36 | 2020-06-17T01:53:20 | Python | UTF-8 | Python | false | false | 2,379 | py | # Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import datetime
import random
from datetime import timedelta
from o2despy.sandbox import Sandbox
class BirthDeath(Sandbox):
def __init__(self, hourly_birth_rate, hourly_death_rate, seed=0):
super().__init__(seed=seed)
self.hourly_birth_rate = hourly_birth_rate
self.hourly_death_rate = hourly_death_rate
self.population = self.add_hour_counter()
# self.schedule([self.birth], timedelta(seconds=0))
self.schedule([self.birth])
def birth(self):
self.population.observe_change(1)
print("{0}\tBirth (Population: #{1}!)".format(self.clock_time, self.population.last_count))
self.schedule([self.birth], timedelta(hours=round(random.expovariate(self.hourly_birth_rate), 2)))
self.schedule([self.death], timedelta(hours=round(random.expovariate(self.hourly_death_rate), 2)))
def death(self):
self.population.observe_change(-1)
print("{0}\tDeath (Population: #{1}!)".format(self.clock_time, self.population.last_count))
if __name__ == '__main__':
# Demo 2
print("Demo 2 - Birth Death Process")
sim = BirthDeath(20, 1, seed=1)
sim.warmup(period=datetime.timedelta(hours=24))
sim.run(duration=datetime.timedelta(hours=30))
| [
"noreply@github.com"
] | huawei-noah.noreply@github.com |
68d4ffaec3b27b725bd05a50989f1f215328c4b8 | 5f69a6549b8d5e417553d910622e6855b2ae679b | /projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/architecture.py | 123004ef5239c4420d42105dbf720eb77c14adc4 | [
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | permissive | opendr-eu/opendr | 822219f709613d77c5eb62c5d02808d344239835 | b3d6ce670cdf63469fc5766630eb295d67b3d788 | refs/heads/master | 2023-08-31T07:02:36.375231 | 2023-08-29T06:39:51 | 2023-08-29T06:39:51 | 293,755,225 | 535 | 82 | Apache-2.0 | 2023-09-13T16:53:34 | 2020-09-08T08:55:04 | Python | UTF-8 | Python | false | false | 7,410 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.nn.utils.spectral_norm as spectral_norm
from .normalization import SPADE
from ...util import util
# ResNet block that uses SPADE.
# It differs from the ResNet block of pix2pixHD in that
# it takes in the segmentation map as input, learns the skip connection if necessary,
# and applies normalization first and then convolution.
# This architecture seemed like a standard architecture for unconditional or
# class-conditional GAN architecture using residual block.
# The code was inspired from https://github.com/LMescheder/GAN_stability.
class SPADEResnetBlock(nn.Module):
def __init__(self, fin, fout, opt):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in opt.norm_G:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = opt.norm_G.replace('spectral', '')
self.norm_0 = SPADE(spade_config_str, fmiddle, opt.semantic_nc)
self.norm_1 = SPADE(spade_config_str, fout, opt.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE(spade_config_str, fout, opt.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def _forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
out = x_s + dx
return out
def forward(self, x, seg):
if self.learned_shortcut:
x_s = self.norm_s(self.conv_s(x), seg)
else:
x_s = x
dx = self.actvn(self.norm_0(self.conv_0(x), seg))
dx = self.actvn(self.norm_1(self.conv_1(dx), seg))
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1)
# try to put SPADE into pix2pixHD middle layers
class ResnetSPADEBlock(nn.Module):
def __init__(self, dim, semantic_nc, kernel_size=3):
super().__init__()
norm_G = 'spectralspadesyncbatch3x3'
pw = (kernel_size - 1) // 2
self.conv_0 = nn.Conv2d(dim, dim, kernel_size=kernel_size)
self.conv_1 = nn.Conv2d(dim, dim, kernel_size=kernel_size)
self.padding = nn.ReflectionPad2d(pw)
if 'spectral' in norm_G:
self.add_module('conv_block1', spectral_norm(self.conv_0))
self.add_module('conv_block4', spectral_norm(self.conv_1))
# define normalization layers
spade_config_str = norm_G.replace('spectral', '')
self.norm_0 = SPADE(spade_config_str, dim, semantic_nc)
self.norm_1 = SPADE(spade_config_str, dim, semantic_nc)
def forward(self, x, seg):
dx = self.padding(x)
dx = self.activation(self.norm_0(self.conv_0(dx), seg))
dx = self.padding(dx)
dx = self.activation(self.norm_1(self.conv_1(dx), seg))
out = x + dx
return out
def activation(self, x):
return F.leaky_relu(x, 2e-1)
# ResNet block used in pix2pixHD
# We keep the same architecture as pix2pixHD.
class ResnetBlock(nn.Module):
def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3):
super().__init__()
pw = (kernel_size - 1) // 2
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
activation,
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
# add an activation
activation,
)
def forward(self, x):
y = self.conv_block(x)
out = x + y
return out
# VGG architecter, used for the perceptual loss using a pretrained VGG network
class VGG19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(VGG19, self).__init__()
vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class VGGFace19(torch.nn.Module):
def __init__(self, opt, requires_grad=False):
super(VGGFace19, self).__init__()
model = torchvision.models.vgg19_bn(pretrained=False)
ckpt = torch.load(opt.vggface_checkpoint)['state_dict']
util.copy_state_dict(ckpt, model, 'module.base.')
vgg_pretrained_features = model.features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
| [
"noreply@github.com"
] | opendr-eu.noreply@github.com |
726f81a2bbf5d4b11fa85cab9560e8742992fb7b | a0e895ec31d4b376c50e203b7a9c018c288d3287 | /hw2/random_sample_predict.py | ec22bad40ae53aa43ff9832041f1914c7f47e3ff | [] | no_license | eiahb3838ya/2018_ntu_machine_learning | 43c3885c6175c0f753d9597732b59986234c8360 | 256cfa806dc403dcc1b5eb51317cf4972de28787 | refs/heads/master | 2020-04-17T16:32:17.417827 | 2019-01-21T03:54:43 | 2019-01-21T03:54:43 | 166,744,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 15:50:15 2018
@author: eiahb
"""
#import scipy,pprint
#from pprint import pprint
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
#from sklearn.metrics import log_loss
#import datetime
from my_class.common_function import *
from imblearn.over_sampling import SMOTE, ADASYN,RandomOverSampler
TRAIN_NUM=11
mylog=init_logging()
w_init=np.load("temp_W_b/lr_W_chanel2.npy")
b_init=np.load("temp_W_b/lr_b_chanel2.npy")
#load prework of train_x
raw_train_x=pd.read_csv("train_x.csv",encoding="big5")
train_x=prework_x(raw_train_x)
#load prework of train_y
raw_train_y=pd.read_csv("train_y.csv",encoding="big5")
train_y=raw_train_y
#load prework of test_x
raw_test_x=pd.read_csv("test_x.csv",encoding="big5")
test_x=prework_x(raw_test_x)
#reshape to fit model
train_x_np=np.array(train_x)
train_y_np=np.array(train_y)#.reshape(-1,)
test_x_np=np.array(test_x)
print("shape of train_x,test_x,train_y_np:",train_x_np.shape,test_x_np.shape,train_y_np.shape)
#resampling
#x_resampled, y_resampled = SMOTE().fit_resample(train_x_np, train_y_np)
#print("shape of X_resampled,y_resampled:",x_resampled.shape,y_resampled.shape)
#train_x=x_resampled.reshape(-1,train_x_np.shape[1])
#train_y=y_resampled.reshape(-1,1)
#print("shape of train_x,train_y:",train_x.shape,train_y.shape)
lr=Logistic_Regression_gradient()
lr.train(train_x_np,train_y_np,train_num=TRAIN_NUM,w_init=w_init,b_init=b_init,epochs=5000000,batch_size=120)
mylog.info("training done")
test_x_scaled=lr.feature_scaling(test_x_np)
lr.predict(test_x_scaled,train_num=TRAIN_NUM,result=True)
np.save("temp_W_b/lr_W_chanel2.npy",lr.W,)
np.save("temp_W_b/lr_b_chanel2.npy",lr.b,)
#last_W=lr.W
#last_b=lr.b
#mylog.debug("start train #"+str(TRAIN_NUM))
#lr.train(train_x_np,train_y_np,w_init=last_W,b_init=last_b,train_num=TRAIN_NUM,epochs=500000)
#test_x=lr.feature_scaling(test_x)
#last_W=lr.W
#last_b=lr.b
#lr.predict(test_x,result=True,train_num=TRAIN_NUM)
#W = np.zeros((train_x.shape[1], 1))
#np.dot(train_x,W)
#sigmoid_v = np.vectorize(sigmoid)
#sigmoid_v(np.dot(train_x,W))
| [
"eiahb3838ya@gmail.com"
] | eiahb3838ya@gmail.com |
6616f04e1e77a286e4ed8db783e1a8baec073a2c | c008898bf1adbba7110e0747343adbb3b01fc6c1 | /schema.py | e9c598f65df574c60c7bc23db3b509a9ea51aa43 | [] | no_license | gloompi/python-graphql | 45368fb3130dfc7d5e38614c59e638bb713706ee | 29ca5859027029f625196d13e232d3337f64ca36 | refs/heads/master | 2022-12-15T19:22:51.001813 | 2019-06-28T07:53:25 | 2019-06-28T07:53:25 | 194,230,911 | 0 | 0 | null | 2022-12-10T01:36:39 | 2019-06-28T07:46:37 | Python | UTF-8 | Python | false | false | 2,127 | py | import graphene
import json
import uuid
from datetime import datetime
class Post(graphene.ObjectType):
title = graphene.String()
content = graphene.String()
class User(graphene.ObjectType):
id = graphene.ID(default_value=str(uuid.uuid4()))
username = graphene.String()
created_at = graphene.DateTime(default_value=datetime.now())
avatar_url = graphene.String()
def resolve_avatar_url(self, info):
return f'https://cloudinary.com/{self.username}/{self.id}'
class Query(graphene.ObjectType):
users = graphene.List(User, limit=graphene.Int())
hello = graphene.String()
is_admin = graphene.Boolean()
def resolve_hello(self, info):
return 'world'
def resolve_is_admin(self, info):
return True
def resolve_users(self, info, limit=None):
return [
User(id="1", username="Kuba", created_at=datetime.now()),
User(id="2", username="Tina", created_at=datetime.now()),
User(id="3", username="Tiger", created_at=datetime.now())
][:limit]
class CreateUser(graphene.Mutation):
user = graphene.Field(User)
class Arguments:
username = graphene.String()
def mutate(self, info, username):
user = User(username=username)
return CreateUser(user=user)
class CreatePost(graphene.Mutation):
post = graphene.Field(Post)
class Arguments:
title = graphene.String()
content = graphene.String()
def mutate(self, info, title, content):
if info.context.get('is_anonymus'):
raise Exception('Not authenticated')
post = Post(title=title, content=content)
return CreatePost(post=post)
class Mutation(graphene.ObjectType):
create_user = CreateUser.Field()
create_post = CreatePost.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
result = schema.execute(
'''
{
users {
id
createdAt
username
avatarUrl
}
}
''',
# context={ 'is_anonymus': True }
# variable_values={'limit': 2}
)
print('ERROR', result.errors)
dictResult = dict(result.data.items())
print(json.dumps(dictResult, indent=2))
| [
"gloompi@gmail.com"
] | gloompi@gmail.com |
d869097b1775e96d604c69bdde7348b1eb27b9c8 | ec35df4cc4543f20bd6a1d30f244f67873ecd261 | /045.py | ef73f81e6668900eb23328f490879352303165db | [] | no_license | timsergor/StillPython | 02b6ddc3226cf8d27d8575ca36e75a19cfe6ac9d | 84b3b37bc165b9daf83cca78d577b5a927e483ec | refs/heads/master | 2020-05-24T09:34:14.819413 | 2020-04-19T13:35:06 | 2020-04-19T13:35:06 | 187,209,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | #202. Happy Number. Easy. 46%.
#Write an algorithm to determine if a number is "happy".
#A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.
class Solution:
def isHappy(self, n: int) -> bool:
def next(n):
m = 0
while n > 0:
m += (n % 10)**2
n = n // 10
return(m)
if n == 1:
return(True)
char = {n:True}
while n != 1:
n = next(n)
if n == 1:
return(True)
elif n in char:
return(False)
else:
char[n] = True
# 15min
| [
"noreply@github.com"
] | timsergor.noreply@github.com |
5d53f962fa73efe64ff7649b6cb54f2d621d15f5 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_7862.py | ece14e6c8a8431fc93ce412ed9641490c5d9c4fa | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | # python26.dll was not found
Py2.6
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
a78d1316c681b645f01cafd9966a4bb424491802 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03660/s310524772.py | 3b5afbbf4eb0c248015cdabad9bc34a06be65a3e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | N=int(input())
E=[[] for i in range(N)]
for i in range(N-1):
x,y=map(int,input().split())
x-=1
y-=1
E[x].append(y)
E[y].append(x)
from collections import deque
BACK=[-1]*N
Q=deque([0])
while Q:
x=Q.pop()
for to in E[x]:
if BACK[to]==-1:
BACK[to]=x
Q.append(to)
ROAD=[N-1]
while ROAD[-1]!=0:
ROAD.append(BACK[ROAD[-1]])
LEN=len(ROAD)
COLOR=[-1]*N
QW=deque()
QB=deque()
for i in range(LEN//2):
COLOR[ROAD[i]]=1
QB.append(ROAD[i])
for i in range(LEN//2,LEN):
COLOR[ROAD[i]]=0
QW.append(ROAD[i])
SW=0
if LEN%2==1:
SW+=1
SB=0
while QW:
x=QW.pop()
for to in E[x]:
if COLOR[to]==-1:
COLOR[to]=0
SW+=1
QW.append(to)
while QB:
x=QB.pop()
for to in E[x]:
if COLOR[to]==-1:
COLOR[to]=1
SB+=1
QB.append(to)
if SW>SB:
print("Fennec")
else:
print("Snuke")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
81b8771788890aa2f6794dde1f7552b4c07a80cd | b9b06d86d43e738b62ab9289fc13aae4c2b2670b | /weekend1/py0102/for1.py | 1f39b536774af0ab4e720cc81ddfe41afd787886 | [] | no_license | MrZhangzhg/nsd_2018 | 31a7a8d54e2cb3ff4f4eb5c736fbd76601718356 | 458a1fef40c5e15ba7689fcb3a00baf893ac0218 | refs/heads/master | 2020-04-08T19:08:48.237646 | 2019-09-08T04:31:07 | 2019-09-08T04:31:07 | 159,642,127 | 5 | 7 | null | 2019-01-04T05:33:40 | 2018-11-29T09:37:27 | Python | UTF-8 | Python | false | false | 550 | py | astr = 'tom'
alist = [10, 20]
atuple = ('tom', 'jerry')
adict = {'name': 'tom', 'age': 20}
# for ch in astr:
# print(ch)
#
# for i in alist:
# print(i)
#
# for name in atuple:
# print(name)
#
# for key in adict:
# print(key, adict[key])
# range函数
print(range(10))
print(list(range(10)))
for i in range(10):
print(i)
# range只有一个参数,表示结束数字,开始默认为0,但是结束数字不包含
print(list(range(6, 11)))
print(list(range(1, 11, 2))) # 2是步长值
print(list(range(10, 0, -1)))
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
4499443d25983a3ebaa681cf359a318a7170d7ea | 845d4102771a547dbc447f1d837b89a538f977b7 | /exerciciosComCondicionais/A_CONDICIONAIS/02A_EX12.py | 66f50bc2221fc6508059fec4ac2c14b7d8b00b06 | [] | no_license | TemistoclesZwang/Algoritmo_IFPI_2020 | 16e92d6f3e5e3f15ad573819cbd0171c5a5e3f5d | cc24657864985c3894ab738692807a01eab8d377 | refs/heads/main | 2023-08-23T02:57:58.838585 | 2021-10-05T16:18:14 | 2021-10-05T16:18:14 | 310,669,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #12. Leia 1 (um) número inteiro e escreva se este número é par ou impar.
def main():
numero = int(input('Insira um número: '))
verificar(numero)
def verificar(numero):
if int(numero) % 2 == 0:
print ('É par')
else:
print ('É impar')
main()
| [
"temis2st@gmail.com"
] | temis2st@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.