blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b611313ab562df73dc49ba291b96278a3b16dcc6 | baa57f56d80e444da14a4967497cae8f11d377e1 | /archive/lym_pipeline/training/train_cae.py | fdaa6b110c5d77b861cc54ce8097fec9859f2d6f | [
"BSD-3-Clause"
] | permissive | leoatchina/u24_lymphocyte | ae7b422b08bad6e5c33836662b65c5e91a54e4f4 | 94a72686e41cad874d5e40604cec960939c79a28 | refs/heads/master | 2020-07-16T09:25:37.339784 | 2019-08-09T16:53:43 | 2019-08-09T16:53:43 | 205,763,378 | 0 | 0 | BSD-3-Clause | 2019-09-02T02:36:29 | 2019-09-02T02:36:29 | null | UTF-8 | Python | false | false | 9,561 | py | import pickle
import sys
import os
import urllib
import gzip
import cPickle
import time
import lasagne
import theano
import numpy as np
import theano.tensor as T
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from theano.sandbox.neighbours import neibs2images
from lasagne.nonlinearities import sigmoid, rectify, leaky_rectify, identity
from lasagne.nonlinearities import softmax
from lasagne import regularization
from scipy import misc
from PIL import Image
from lasagne import init
from math import floor
from shape import ReshapeLayer
from batch_norms import batch_norm, SoftThresPerc
from data_aug_100x100 import data_aug
from ch_inner_prod import ChInnerProd, ChInnerProdMerge
PS = 100;
LearningRate = theano.shared(np.array(3e-2, dtype=np.float32));
NumEpochs = 10;
BatchSize = 32;
filename_model_ae = 'models/cae_model.pkl'
filename_mu = 'models/mu.pkl';
filename_sigma = 'models/sigma.pkl';
mu = pickle.load(open(filename_mu, 'rb'));
sigma = pickle.load(open(filename_sigma, 'rb'));
def load_data():
all_data_folder = '/data03/shared/lehhou/lym_project/data/vals/random_patches_for_all_svs/';
nbuf = 0;
X_train = np.zeros(shape=(500000, 3, 100, 100), dtype=np.float32);
lines = [line.rstrip('\n') for line in open(all_data_folder + '/label.txt')];
for line in lines:
full_path = all_data_folder + '/image_' + line.split()[0];
png = np.array(Image.open(full_path).convert('RGB')).transpose() / 255.0;
X_train[nbuf, :, :, :] = png;
nbuf += 1;
X_train = X_train[0:nbuf];
print "Computing mean and std";
X_train = (X_train - mu) / sigma;
print "Data Loaded", X_train.shape[0];
return X_train;
def iterate_minibatches_ae(inputs, batchsize, shuffle=False):
if shuffle:
indices = np.arange(len(inputs));
np.random.shuffle(indices);
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize];
else:
excerpt = slice(start_idx, start_idx + batchsize);
yield inputs[excerpt];
def build_autoencoder_network():
input_var = T.tensor4('input_var');
layer = layers.InputLayer(shape=(None, 3, PS, PS), input_var=input_var);
layer = batch_norm(layers.Conv2DLayer(layer, 100, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 120, filter_size=(5,5), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 240, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Conv2DLayer(layer, 320, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
layer = layers.Pool2DLayer(layer, pool_size=(2,2), stride=2, mode='average_inc_pad');
layer = batch_norm(layers.Conv2DLayer(layer, 640, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
prely = batch_norm(layers.Conv2DLayer(layer, 1024, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
featm = batch_norm(layers.Conv2DLayer(prely, 640, filter_size=(1,1), nonlinearity=leaky_rectify));
feat_map = batch_norm(layers.Conv2DLayer(featm, 100, filter_size=(1,1), nonlinearity=rectify, name="feat_map"));
maskm = batch_norm(layers.Conv2DLayer(prely, 100, filter_size=(1,1), nonlinearity=leaky_rectify));
mask_rep = batch_norm(layers.Conv2DLayer(maskm, 1, filter_size=(1,1), nonlinearity=None), beta=None, gamma=None);
mask_map = SoftThresPerc(mask_rep, perc=98.4, alpha=0.1, beta=init.Constant(0.5), tight=100.0, name="mask_map");
layer = ChInnerProdMerge(feat_map, mask_map, name="encoder");
layer = batch_norm(layers.Deconv2DLayer(layer, 1024, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 640, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 320, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 240, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 120, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = batch_norm(layers.Deconv2DLayer(layer, 100, filter_size=(5,5), stride=1, crop='same', nonlinearity=leaky_rectify));
layer = layers.Deconv2DLayer(layer, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
glblf = batch_norm(layers.Conv2DLayer(prely, 128, filter_size=(1,1), nonlinearity=leaky_rectify));
glblf = layers.Pool2DLayer(glblf, pool_size=(5,5), stride=5, mode='average_inc_pad');
glblf = batch_norm(layers.Conv2DLayer(glblf, 64, filter_size=(3,3), stride=1, pad='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Conv2DLayer(glblf, 5, filter_size=(1,1), nonlinearity=rectify), name="global_feature");
glblf = batch_norm(layers.Deconv2DLayer(glblf, 256, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(9,9), stride=5, crop=(2,2), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 128, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 64, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(4,4), stride=2, crop=(1,1), nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = batch_norm(layers.Deconv2DLayer(glblf, 32, filter_size=(3,3), stride=1, crop='same', nonlinearity=leaky_rectify));
glblf = layers.Deconv2DLayer(glblf, 3, filter_size=(1,1), stride=1, crop='same', nonlinearity=identity);
layer = layers.ElemwiseSumLayer([layer, glblf]);
network = ReshapeLayer(layer, ([0], -1));
mask_var = lasagne.layers.get_output(mask_map);
output_var = lasagne.layers.get_output(network);
return network, input_var, mask_var, output_var;
def build_training_function(network, input_var, mask_var, output_var):
print("building training function");
target_var = T.matrix('target_var');
loss = lasagne.objectives.squared_error(output_var, target_var).mean();
param_set = lasagne.layers.get_all_params(network, trainable=True);
updates = lasagne.updates.nesterov_momentum(loss, param_set, learning_rate=LearningRate, momentum=0.9);
train_func = theano.function([input_var, target_var], [loss, mask_var], updates=updates);
print("finish building training function");
return train_func;
def exc_train(train_func, X_train, network):
print("Starting training...");
print("Epoch\t\tIter\t\tLoss\t\tSpar\t\tTime");
it_div = 100;
for epoch in range(NumEpochs):
start_time = time.time();
for it in range(it_div):
# Iterate through mini batches
total_loss = 0;
total_sparsity = 0;
n_batch = 0;
for batch in iterate_minibatches_ae(X_train[it::it_div], BatchSize, shuffle=True):
batch = data_aug(batch);
batch_target = np.reshape(batch, (batch.shape[0], -1));
loss, mask = train_func(batch, batch_target);
total_loss += loss;
total_sparsity += 100.0 * float(np.count_nonzero(mask>1e-6)) / mask.size;
n_batch += 1;
total_loss /= n_batch;
total_sparsity /= n_batch;
LearningRate.set_value(np.float32(0.99*LearningRate.get_value()));
print("{:d}\t\t{:d}\t\t{:.4f}\t\t{:.3f}\t\t{:.3f}".format(
epoch, it, total_loss, total_sparsity, time.time()-start_time));
start_time = time.time();
if epoch % 1 == 0:
param_values = layers.get_all_param_values(network);
pickle.dump(param_values, open(filename_model_ae.format(epoch), 'w'));
def main():
X_train = load_data();
# Build network
network, input_var, mask_var, output_var = build_autoencoder_network();
train_func = build_training_function(network, input_var, mask_var, output_var);
exc_train(train_func, X_train, network);
print("DONE !");
if __name__ == "__main__":
main();
| [
"lehhou@bright70.cm.cluster"
] | lehhou@bright70.cm.cluster |
e5981c44bf76ad309ffd21b778e24f0e1ed246a9 | ec00584ab288267a7cf46c5cd4f76bbec1c70a6b | /interview-preparation/main_.py | 278fa19f1768df79f357dc82cd5c91b575a8ce5a | [] | no_license | rahuldbhadange/Python | b4cc806ff23953389c9507f43d817b3815260e19 | 7e162117f1acc12537c7eeb36d6983d804122ff3 | refs/heads/master | 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 | Python | UTF-8 | Python | false | false | 7,787 | py | #####################################
# Breadth First Search / Flood fill
# Davis MT
# 28.01.2018
#####################################
import turtle # import turtle library
import time
import sys
from collections import deque
wn = turtle.Screen() # define the turtle screen
wn.bgcolor("black") # set the background colour
wn.title("A BFS Maze Solving Program")
wn.setup(1300,700) # setup the dimensions of the working window
# this is the class for the Maze
class Maze(turtle.Turtle): # define a Maze class
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square") # the turtle shape
self.color("white") # colour of the turtle
self.penup() # lift up the pen so it do not leave a trail
self.speed(0)
# this is the class for the finish line - green square in the maze
class Green(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("green")
self.penup()
self.speed(0)
class Blue(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("blue")
self.penup()
self.speed(0)
# this is the class for the yellow or turtle
class Red(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("red")
self.penup()
self.speed(0)
class Yellow(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("yellow")
self.penup()
self.speed(0)
# grid = [
# "+++++++++++++++",
# "+s+ + +e+",
# "+ +++++ +++ + +",
# "+ + + + +",
# "+ + +++ + + +",
# "+ + + + + + +",
# "+ + + + + +",
# "+++++ + + + +",
# "+ + + +",
# "+++++++++++++++",
# ]
# grid = [
# "+++++++++",
# "+ ++s++++",
# "+ ++ ++++",
# "+ ++ ++++",
# "+ ++++",
# "++++ ++++",
# "++++ ++++",
# "+ e+",
# "+++++++++",
# ]
# grid = [
# "+++++++++++++++",
# "+ +",
# "+ +",
# "+ +",
# "+ e +",
# "+ +",
# "+ +",
# "+ +",
# "+ s +",
# "+++++++++++++++",
# ]
grid = [
"+++++++++++++++++++++++++++++++++++++++++++++++++++",
"+ + +",
"+ ++++++++++ +++++++++++++ +++++++ ++++++++++++",
"+s + + ++ +",
"+ +++++++ +++++++++++++ +++++++++++++++++++++ +",
"+ + + + + + +++ +",
"+ + + + + + ++++ + + +++++++++++++ +++ +",
"+ + + + + + + + + + + +",
"+ + ++++ + ++++++++++ + + ++++ + + ++ +",
"+ + + + + + + + ++ ++",
"+ ++++ + +++++++ ++++++++ +++++++++++++ ++ ++",
"+ + + + + ++ +",
"++++ + ++++++++++ +++++++++++ ++++++++++ +++ +",
"+ + + + + + + +++ +",
"+ + ++++ +++++++++++++ + ++++ + + + ++ +",
"+ + + + + + + + + + ++ ++",
"+ + + +++++++ ++++ + + + ++++++++++ ++ ++",
"+ + + + ++ ++",
"+ ++++++ + + + + +++ +++ ++",
"+ ++++++ ++++++ +++++++++ ++ ++ ++++++++++ ++",
"+ + + +++ + +++++++++ ++ +++++++ + ++",
"+ ++++ ++++ +++ + +++ +++ ++ ++ ++ ++ + ++",
"+ ++++ + + +++ +++ ++ ++++++++ ++ ++ ++ ++",
"+ ++ +++++++e+++ ++ ++ +++++++",
"+++++++++++++++++++++++++++++++++++++++++++++++++++",
]
def setup_maze(grid): # define a function called setup_maze
global start_x, start_y, end_x, end_y # set up global variables for start and end locations
for y in range(len(grid)): # read in the grid line by line
for x in range(len(grid[y])): # read each cell in the line
character = grid[y][x] # assign the varaible "character" the the x and y location od the grid
screen_x = -588 + (x * 24) # move to the x location on the screen staring at -588
screen_y = 288 - (y * 24) # move to the y location of the screen starting at 288
if character == "+":
maze.goto(screen_x, screen_y) # move pen to the x and y locaion and
maze.stamp() # stamp a copy of the turtle on the screen
walls.append((screen_x, screen_y)) # add coordinate to walls list
if character == " " or character == "e":
path.append((screen_x, screen_y)) # add " " and e to path list
if character == "e":
green.color("purple")
green.goto(screen_x, screen_y) # send green sprite to screen location
end_x, end_y = screen_x,screen_y # assign end locations variables to end_x and end_y
green.stamp()
green.color("green")
if character == "s":
start_x, start_y = screen_x, screen_y # assign start locations variables to start_x and start_y
red.goto(screen_x, screen_y)
def endProgram():
wn.exitonclick()
sys.exit()
def search(x,y):
frontier.append((x, y))
solution[x,y] = x,y
while len(frontier) > 0: # exit while loop when frontier queue equals zero
time.sleep(0)
x, y = frontier.popleft() # pop next entry in the frontier queue an assign to x and y location
if(x - 24, y) in path and (x - 24, y) not in visited: # check the cell on the left
cell = (x - 24, y)
solution[cell] = x, y # backtracking routine [cell] is the previous cell. x, y is the current cell
#blue.goto(cell) # identify frontier cells
#blue.stamp()
frontier.append(cell) # add cell to frontier list
visited.add((x-24, y)) # add cell to visited list
if (x, y - 24) in path and (x, y - 24) not in visited: # check the cell down
cell = (x, y - 24)
solution[cell] = x, y
#blue.goto(cell)
#blue.stamp()
frontier.append(cell)
visited.add((x, y - 24))
print(solution)
if(x + 24, y) in path and (x + 24, y) not in visited: # check the cell on the right
cell = (x + 24, y)
solution[cell] = x, y
#blue.goto(cell)
#blue.stamp()
frontier.append(cell)
visited.add((x +24, y))
if(x, y + 24) in path and (x, y + 24) not in visited: # check the cell up
cell = (x, y + 24)
solution[cell] = x, y
#blue.goto(cell)
#blue.stamp()
frontier.append(cell)
visited.add((x, y + 24))
green.goto(x,y)
green.stamp()
def backRoute(x, y):
yellow.goto(x, y)
yellow.stamp()
while (x, y) != (start_x, start_y): # stop loop when current cells == start cell
yellow.goto(solution[x, y]) # move the yellow sprite to the key value of solution ()
yellow.stamp()
x, y = solution[x, y] # "key value" now becomes the new key
# set up classes
maze = Maze()
red = Red()
blue = Blue()
green = Green()
yellow = Yellow()
# setup lists
walls = []
path = []
visited = set()
frontier = deque()
solution = {} # solution dictionary
# main program starts here ####
setup_maze(grid)
search(start_x,start_y)
backRoute(end_x, end_y)
wn.exitonclick()
| [
"46024570+rahuldbhadange@users.noreply.github.com"
] | 46024570+rahuldbhadange@users.noreply.github.com |
4827d7f523d7fa49654a251a821cafa30ee7bb77 | 820acfd783cc9752c51e2899b87994bb3044a895 | /tests/commands/source/test_xml.py | 930330c424cae5be04b493044dbc4ede77ddc33e | [] | no_license | trimailov/spinta | ea495a9d4ebfbd8c13c644691824edf967c90091 | c242ea8013a048a5cfd2bfc4dd8687d37bfc3ef7 | refs/heads/master | 2020-05-03T21:56:25.040555 | 2019-04-09T11:19:43 | 2019-04-09T11:19:43 | 178,834,328 | 0 | 0 | null | 2019-04-01T09:54:27 | 2019-04-01T09:54:27 | null | UTF-8 | Python | false | false | 1,009 | py | import operator
import pathlib
from responses import GET
from spinta.utils.itertools import consume
def test_xml(store, responses):
responses.add(
GET, 'http://example.com/data.xml',
status=200, content_type='application/xml; charset=utf-8',
body=(pathlib.Path(__file__).parents[2] / 'data/data.xml').read_bytes(),
stream=True,
)
assert consume(store.pull('xml')) == 8
assert sorted(store.getall('tenure', {'source': 'xml'}), key=operator.itemgetter('id'))[:2] == [
{
'type': 'tenure/:source/xml',
'id': '11a0764da48b674ce0c09982e7c43002b510d5b5',
'title': '1996–2000 metų kadencija',
'since': '1996-11-25',
'until': '2000-10-18',
},
{
'type': 'tenure/:source/xml',
'id': '1cc7ac9d26603972f6c471a284ff37b9868854d9',
'title': '2016–2020 metų kadencija',
'since': '2016-11-14',
'until': '',
},
]
| [
"sirexas@gmail.com"
] | sirexas@gmail.com |
2d964cd6ec312568c0d249da7be40655fb225323 | 5e6d8b9989247801718dd1f10009f0f7f54c1eb4 | /sdk/python/pulumi_azure_native/network/v20201101/public_ip_prefix.py | a192baac10aaeacd68a8cc241def52ba00bf7b86 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | vivimouret29/pulumi-azure-native | d238a8f91688c9bf09d745a7280b9bf2dd6d44e0 | 1cbd988bcb2aa75a83e220cb5abeb805d6484fce | refs/heads/master | 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,658 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PublicIPPrefixArgs', 'PublicIPPrefix']
@pulumi.input_type
class PublicIPPrefixArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
custom_ip_prefix: Optional[pulumi.Input['SubResourceArgs']] = None,
extended_location: Optional[pulumi.Input['ExtendedLocationArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
ip_tags: Optional[pulumi.Input[Sequence[pulumi.Input['IpTagArgs']]]] = None,
location: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input['NatGatewayArgs']] = None,
prefix_length: Optional[pulumi.Input[int]] = None,
public_ip_address_version: Optional[pulumi.Input[Union[str, 'IPVersion']]] = None,
public_ip_prefix_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['PublicIPPrefixSkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a PublicIPPrefix resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['SubResourceArgs'] custom_ip_prefix: The customIpPrefix that this prefix is associated with.
:param pulumi.Input['ExtendedLocationArgs'] extended_location: The extended location of the public ip address.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input['IpTagArgs']]] ip_tags: The list of tags associated with the public IP prefix.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input['NatGatewayArgs'] nat_gateway: NatGateway of Public IP Prefix.
:param pulumi.Input[int] prefix_length: The Length of the Public IP Prefix.
:param pulumi.Input[Union[str, 'IPVersion']] public_ip_address_version: The public IP address version.
:param pulumi.Input[str] public_ip_prefix_name: The name of the public IP prefix.
:param pulumi.Input['PublicIPPrefixSkuArgs'] sku: The public IP prefix SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if custom_ip_prefix is not None:
pulumi.set(__self__, "custom_ip_prefix", custom_ip_prefix)
if extended_location is not None:
pulumi.set(__self__, "extended_location", extended_location)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_tags is not None:
pulumi.set(__self__, "ip_tags", ip_tags)
if location is not None:
pulumi.set(__self__, "location", location)
if nat_gateway is not None:
pulumi.set(__self__, "nat_gateway", nat_gateway)
if prefix_length is not None:
pulumi.set(__self__, "prefix_length", prefix_length)
if public_ip_address_version is not None:
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_prefix_name is not None:
pulumi.set(__self__, "public_ip_prefix_name", public_ip_prefix_name)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="customIPPrefix")
def custom_ip_prefix(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The customIpPrefix that this prefix is associated with.
"""
return pulumi.get(self, "custom_ip_prefix")
@custom_ip_prefix.setter
def custom_ip_prefix(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "custom_ip_prefix", value)
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional[pulumi.Input['ExtendedLocationArgs']]:
"""
The extended location of the public ip address.
"""
return pulumi.get(self, "extended_location")
@extended_location.setter
def extended_location(self, value: Optional[pulumi.Input['ExtendedLocationArgs']]):
pulumi.set(self, "extended_location", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipTags")
def ip_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpTagArgs']]]]:
"""
The list of tags associated with the public IP prefix.
"""
return pulumi.get(self, "ip_tags")
@ip_tags.setter
def ip_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IpTagArgs']]]]):
pulumi.set(self, "ip_tags", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="natGateway")
def nat_gateway(self) -> Optional[pulumi.Input['NatGatewayArgs']]:
"""
NatGateway of Public IP Prefix.
"""
return pulumi.get(self, "nat_gateway")
@nat_gateway.setter
def nat_gateway(self, value: Optional[pulumi.Input['NatGatewayArgs']]):
pulumi.set(self, "nat_gateway", value)
@property
@pulumi.getter(name="prefixLength")
def prefix_length(self) -> Optional[pulumi.Input[int]]:
"""
The Length of the Public IP Prefix.
"""
return pulumi.get(self, "prefix_length")
@prefix_length.setter
def prefix_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "prefix_length", value)
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[pulumi.Input[Union[str, 'IPVersion']]]:
"""
The public IP address version.
"""
return pulumi.get(self, "public_ip_address_version")
@public_ip_address_version.setter
def public_ip_address_version(self, value: Optional[pulumi.Input[Union[str, 'IPVersion']]]):
pulumi.set(self, "public_ip_address_version", value)
@property
@pulumi.getter(name="publicIpPrefixName")
def public_ip_prefix_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the public IP prefix.
"""
return pulumi.get(self, "public_ip_prefix_name")
@public_ip_prefix_name.setter
def public_ip_prefix_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_ip_prefix_name", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['PublicIPPrefixSkuArgs']]:
"""
The public IP prefix SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['PublicIPPrefixSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
class PublicIPPrefix(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
custom_ip_prefix: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpTagArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input[pulumi.InputType['NatGatewayArgs']]] = None,
prefix_length: Optional[pulumi.Input[int]] = None,
public_ip_address_version: Optional[pulumi.Input[Union[str, 'IPVersion']]] = None,
public_ip_prefix_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['PublicIPPrefixSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Public IP prefix resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] custom_ip_prefix: The customIpPrefix that this prefix is associated with.
:param pulumi.Input[pulumi.InputType['ExtendedLocationArgs']] extended_location: The extended location of the public ip address.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpTagArgs']]]] ip_tags: The list of tags associated with the public IP prefix.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[pulumi.InputType['NatGatewayArgs']] nat_gateway: NatGateway of Public IP Prefix.
:param pulumi.Input[int] prefix_length: The Length of the Public IP Prefix.
:param pulumi.Input[Union[str, 'IPVersion']] public_ip_address_version: The public IP address version.
:param pulumi.Input[str] public_ip_prefix_name: The name of the public IP prefix.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['PublicIPPrefixSkuArgs']] sku: The public IP prefix SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PublicIPPrefixArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Public IP prefix resource.
:param str resource_name: The name of the resource.
:param PublicIPPrefixArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PublicIPPrefixArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
custom_ip_prefix: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpTagArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
nat_gateway: Optional[pulumi.Input[pulumi.InputType['NatGatewayArgs']]] = None,
prefix_length: Optional[pulumi.Input[int]] = None,
public_ip_address_version: Optional[pulumi.Input[Union[str, 'IPVersion']]] = None,
public_ip_prefix_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['PublicIPPrefixSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PublicIPPrefixArgs.__new__(PublicIPPrefixArgs)
__props__.__dict__["custom_ip_prefix"] = custom_ip_prefix
__props__.__dict__["extended_location"] = extended_location
__props__.__dict__["id"] = id
__props__.__dict__["ip_tags"] = ip_tags
__props__.__dict__["location"] = location
__props__.__dict__["nat_gateway"] = nat_gateway
__props__.__dict__["prefix_length"] = prefix_length
__props__.__dict__["public_ip_address_version"] = public_ip_address_version
__props__.__dict__["public_ip_prefix_name"] = public_ip_prefix_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["zones"] = zones
__props__.__dict__["etag"] = None
__props__.__dict__["ip_prefix"] = None
__props__.__dict__["load_balancer_frontend_ip_configuration"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_ip_addresses"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20180701:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20180701:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20180801:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20180801:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20181001:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20181001:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20181101:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20181101:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20181201:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20181201:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20190201:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20190201:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20190401:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20190401:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20190601:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20190601:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20190701:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20190701:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20190801:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20190801:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20190901:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20190901:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20191101:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20191101:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20191201:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20191201:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20200301:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20200301:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20200401:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20200401:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20200501:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20200501:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20200601:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20200601:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20200701:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20200701:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20200801:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20200801:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20210201:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20210201:PublicIPPrefix"), pulumi.Alias(type_="azure-native:network/v20210301:PublicIPPrefix"), pulumi.Alias(type_="azure-nextgen:network/v20210301:PublicIPPrefix")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PublicIPPrefix, __self__).__init__(
'azure-native:network/v20201101:PublicIPPrefix',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PublicIPPrefix':
"""
Get an existing PublicIPPrefix resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PublicIPPrefixArgs.__new__(PublicIPPrefixArgs)
__props__.__dict__["custom_ip_prefix"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["extended_location"] = None
__props__.__dict__["ip_prefix"] = None
__props__.__dict__["ip_tags"] = None
__props__.__dict__["load_balancer_frontend_ip_configuration"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["nat_gateway"] = None
__props__.__dict__["prefix_length"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_ip_address_version"] = None
__props__.__dict__["public_ip_addresses"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["zones"] = None
return PublicIPPrefix(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="customIPPrefix")
def custom_ip_prefix(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The customIpPrefix that this prefix is associated with.
"""
return pulumi.get(self, "custom_ip_prefix")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:
"""
The extended location of the public ip address.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="ipPrefix")
def ip_prefix(self) -> pulumi.Output[str]:
"""
The allocated Prefix.
"""
return pulumi.get(self, "ip_prefix")
@property
@pulumi.getter(name="ipTags")
def ip_tags(self) -> pulumi.Output[Optional[Sequence['outputs.IpTagResponse']]]:
"""
The list of tags associated with the public IP prefix.
"""
return pulumi.get(self, "ip_tags")
@property
@pulumi.getter(name="loadBalancerFrontendIpConfiguration")
def load_balancer_frontend_ip_configuration(self) -> pulumi.Output['outputs.SubResourceResponse']:
"""
The reference to load balancer frontend IP configuration associated with the public IP prefix.
"""
return pulumi.get(self, "load_balancer_frontend_ip_configuration")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natGateway")
def nat_gateway(self) -> pulumi.Output[Optional['outputs.NatGatewayResponse']]:
"""
NatGateway of Public IP Prefix.
"""
return pulumi.get(self, "nat_gateway")
@property
@pulumi.getter(name="prefixLength")
def prefix_length(self) -> pulumi.Output[Optional[int]]:
"""
The Length of the Public IP Prefix.
"""
return pulumi.get(self, "prefix_length")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the public IP prefix resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> pulumi.Output[Optional[str]]:
"""
The public IP address version.
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAddresses")
def public_ip_addresses(self) -> pulumi.Output[Sequence['outputs.ReferencedPublicIpAddressResponse']]:
"""
The list of all referenced PublicIPAddresses.
"""
return pulumi.get(self, "public_ip_addresses")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the public IP prefix resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.PublicIPPrefixSkuResponse']]:
"""
The public IP prefix SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
return pulumi.get(self, "zones")
| [
"noreply@github.com"
] | vivimouret29.noreply@github.com |
b1c38b5c878766c01da5a18679414e6c7da92591 | acf7457d3a799cb9bff12686d2d616688bcd4b5b | /packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_yref.py | 7e6de12fbde3d181d7496e94eb85d4bc1c52ff16 | [
"MIT"
] | permissive | plotly/plotly.py | f4f61639f08160f16195efc95b5901dc5a937346 | 975a704074f01c078e0fdfa32bdf17130bf89e69 | refs/heads/master | 2023-09-06T06:15:08.340035 | 2023-08-24T12:28:14 | 2023-08-24T12:28:14 | 14,579,099 | 14,751 | 2,989 | MIT | 2023-09-08T19:55:32 | 2013-11-21T05:53:08 | Python | UTF-8 | Python | false | false | 489 | py | import _plotly_utils.basevalidators
class YrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yref", parent_name="scatter3d.marker.colorbar", **kwargs
):
super(YrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
| [
"liam@plot.ly"
] | liam@plot.ly |
0b6359ff6e3d4a1aad8a712944c8a1246ff1efa7 | 28ce113cc12aab7f2809c2cb26763055f1722293 | /groceryProject/groceryProject/wsgi.py | 082460953e1d280c971b7a18c90b76e6d43315ab | [] | no_license | sd8917/GroceryWebApp | a899263b5220436d52a0c7a9beb7a379d3f6e112 | 5aa4f3c3e2c9fe0351dafe9ca9471a9fb2fad49e | refs/heads/master | 2022-04-15T15:09:41.375925 | 2020-04-13T10:07:39 | 2020-04-13T10:07:39 | 255,280,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | """
WSGI config for groceryProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'groceryProject.settings')
application = get_wsgi_application()
| [
"sudhanshuraj8917@gmail.com"
] | sudhanshuraj8917@gmail.com |
8d1f53f681778dcace34d0bad67e4ce83baac7db | 58d832a76c6cf2d80c31310631efca20658f68d6 | /neural_network/word2vec/createDataset.py | 8280f31ba06e3928f5b80bb70f24df59431b160b | [] | no_license | OneClickDeepLearning/classificationOfResidentialRequests | e6fe0071054337af8cf14acaa3b598123575ce6c | 397dfa6ef0e08669b329f17bd5741de0484e3c9c | refs/heads/master | 2020-05-30T18:52:01.085590 | 2019-09-16T02:36:36 | 2019-09-16T02:36:36 | 189,906,790 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | import csv
import re
import jieba
import pandas as pd
file1 = '/Users/sunjincheng/Desktop/Prof-project/NLPproject/data/valid_data_all.csv'
# df = pd.read_csv(file1,encoding='gb18030')
file2 = '../data/split_data.txt'
read_file = open(file1, encoding='gb18030')
lines = csv.reader(read_file)
count=0
lengths = []
write_file = open(file2,'a',encoding='utf-8')
for line in lines:
count +=1
if(count == 1):
continue
sent = line[8]
sent = re.sub('市民来电咨询', '', sent)
sent = re.sub('市民来电反映', '', sent)
sent = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[a-zA-Z0-9+——!,。?、~@#¥%……&*()《》::]+", "",sent)
splits = jieba.cut(sent)
length = len(list(splits))
lengths.append(length)
# result = ' '.join(splits)
# print(sent)
# print(result)
# print(result.split())
# write_file.write(result)
# write_file.write('\n')
if (count % 10000 == 0):
print(count)
write_file.close()
read_file.close()
| [
"sjc951213@gmail.com"
] | sjc951213@gmail.com |
789e558277eb3ba1fd9cfb152bc79cdc9a2e5c2c | 53eb6921a7e6ad41f4cf1636290b7704ed0aa736 | /update_recipe.py | 1a87ab2ec667e87d845bffb44d576e825a547cbf | [] | no_license | jakirkham/conda_recipe_tools | 17e903695c1ab4505283efcc7d6e5e73f340e3a2 | 18545e04f3436ec7c67a38274e21e3fb76083797 | refs/heads/master | 2021-01-19T23:48:56.938068 | 2017-01-31T23:55:19 | 2017-01-31T23:55:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | #! /usr/bin/env python
import argparse
import json
import re
from urllib.request import urlopen
try:
from packaging.version import parse as parse_version
except ImportError:
from pip._vendor.packaging.version import parse as parse_version
import jinja2
import yaml
class GitHubSource(object):
RELEASE_URL = 'https://api.github.com/repos/{user}/{repo}/releases'
def __init__(self, user, repo):
self.releases = json.load(open('./github_releases.json'))
@property
def latest_version(self):
versions = [parse_version(r['name']) for r in self.releases]
filtered = [v for v in versions if not v.is_prerelease]
if len(filtered) == 0:
return None
return str(max(filtered))
def get_hash(self, version, filename, hash_type):
pass
class PyPISource(object):
def __init__(self, name):
self.name = name
url = 'https://pypi.io/pypi/' + self.name + '/json'
response = urlopen(url)
self._info = json.loads(response.read().decode('utf8'))
@property
def latest_version(self):
all_releases = self._info['releases'].keys()
versions = [parse_version(s) for s in all_releases]
filtered = [v for v in versions if not v.is_prerelease]
if len(filtered) == 0:
return None
return str(max(filtered))
def get_hash(self, version, filename, hash_type):
release_info = self._info['releases'][version]
entry = [e for e in release_info if e['filename'] == filename][0]
hash_value = entry['digests'][hash_type]
return hash_value
class CondaRecipe(object):
"""
Representation of a conda recipe meta.yaml file.
Parameters
----------
meta_filename : str
meta.yaml path.
"""
def __init__(self, meta_filename):
""" initalize """
# read the meta.yaml file for the recipe
with open(meta_filename) as f:
self._lines = f.readlines()
@property
def _info(self):
""" Dictionary of recipe after rendering using jinja2. """
text = ''.join(self._lines)
rendered_text = jinja2.Template(text).render()
return yaml.load(rendered_text)
@property
def name(self):
""" package name. """
return self._info['package']['name']
@property
def version(self):
return self._info['package']['version']
@version.setter
def version(self, version):
quoted_version = '"' + version + '"'
pattern = '(?<=set version = ).*(?= %})'
self._lines = [re.sub(pattern, quoted_version, l) for l in self._lines]
if self._info['package']['version'] != version:
raise AttributeError("version could not be set")
@property
def hash_type(self):
source_section = self._info['source']
if 'md5' in source_section:
hash_type = 'md5'
elif 'sha256' in source_section:
hash_type = 'sha256'
else:
hash_type = None
return hash_type
@property
def hash_value(self):
return self._info['source'][self.hash_type]
@hash_value.setter
def hash_value(self, hash_value):
hash_type = self.hash_type
lines = self._lines
# replace jinja templated hash tempates
quoted_hash = '"' + hash_value + '"'
pattern = '(?<=set hash_val = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
pattern = '(?<=set hash = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
if hash_type == 'sha256':
pattern = '(?<=set sha256 = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
if hash_type == 'md5':
pattern = '(?<=set md5 = ).*(?= %})'
lines = [re.sub(pattern, quoted_hash, l) for l in lines]
# replace yaml hash values
if hash_type == 'sha256':
pattern = '(?<=sha256: )[0-9A-Fa-f]+'
lines = [re.sub(pattern, hash_value, l) for l in lines]
if hash_type == 'md5':
pattern = '(?<=md5: )[0-9A-Fa-f]+'
lines = [re.sub(pattern, hash_value, l) for l in lines]
self._lines = lines
if self._info['source'][self.hash_type] != hash_value:
raise AttributeError("hash_value could not be set")
@property
def url_filename(self):
url = self._info['source']['url']
filename = url.split('/')[-1]
return filename
@property
def source(self):
source_url = self._info['source']['url']
if source_url.startswith('https://pypi.io'):
return PyPISource(self.name)
elif source_url.startswith('https://github.com'):
pattern = '(?<=https://github.com/)(.*?)/(.*?)/'
user, repo = re.search(pattern, source_url).groups()
return GitHubSource(user, repo)
else:
return None
def write(self, filename):
with open(filename, 'wb') as f:
f.write(''.join(self._lines).encode('utf8'))
def parse_arguments():
parser = argparse.ArgumentParser(
description="Update a conda recipe to a given version")
parser.add_argument(
'--meta', '-m', action='store', default='meta.yaml',
help="path to the recipe's meta.yaml files.")
parser.add_argument(
'--version', '-v', action='store', default=None,
help="version to update the recipe to, defaults is to latest.")
return parser.parse_args()
def main():
args = parse_arguments()
recipe = CondaRecipe(args.meta)
source = recipe.source
# update the version
if args.version is None:
recipe.version = source.latest_version
else:
recipe.version = args.version
# update the hash
hash_value = source.get_hash(
recipe.version, recipe.url_filename, recipe.hash_type)
recipe.hash_value = hash_value
recipe.write(args.meta)
print("Updated", args.meta, "to version", recipe.version)
if __name__ == "__main__":
main()
| [
"jjhelmus@gmail.com"
] | jjhelmus@gmail.com |
3e32bdba53b1986cb51d700d78ad9809ec72fd2a | 2eeeefe48c56d0dfae4fd568dbaee3c8d2cf3463 | /0Demo/networkanalysisDemo/dns.py | e043c685cc2249e44892b70aef683077cc79c071 | [] | no_license | lijianmingCN/pybase | f6377f7944c043f7241452fcffccc3f49ef0cef9 | 7286a022ff7f40a7289cf69d73e8418a1ecf7b88 | refs/heads/master | 2021-01-02T08:10:42.215672 | 2017-08-01T03:16:29 | 2017-08-01T03:16:29 | 98,953,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | # -*- coding: utf-8 -*-
import pcap,dpkt
pc = pcap.pcap()
#We'll start off by setting pypcap's BPF expression to "udp dst port 53"
pc.setfilter('tcp port 80')
for ts, pkt in pc:
eth = dpkt.ethernet.Ethernet(pkt)
ip = eth.data
tcp = ip.data
print tcp.sport
print tcp.dport
if tcp.dport == 80 and len(tcp.data) > 0:
http = dpkt.http.Request(tcp.data)
#Once the HTTP payload has been parsed, we can examine its various attributes
print http.method
print http.uri
print http.version
print http.headers['user-agent']
| [
"lijianming@baidu.com"
] | lijianming@baidu.com |
d3e2f4e03843e37f530804f7007b2e645767f953 | 70b50139dcee04f94de958a9ce13236ef4c3bc34 | /modules/ImageChannels.py | 5276cc81486c8a68314652c3c956420809e48bf7 | [
"MIT"
] | permissive | CGL-Deeplearning/KalleR | 8bf8918b25c2afd5ab4dd6a269c2459413d1a96d | d788020a8e63657ed4f26e64be93b92648ee8d9f | refs/heads/master | 2021-09-07T17:41:27.563516 | 2018-02-27T01:28:03 | 2018-02-27T01:28:03 | 119,736,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,868 | py | DEFAULT_MIN_MAP_QUALITY = 5
IMAGE_HEIGHT = 300
IMAGE_WIDTH = 300
IMAGE_BUFFER = 0
CIGAR_MATCH = 0
CIGAR_IN = 1
CIGAR_DEL = 2
MAX_COLOR_VALUE = 254.0
BASE_QUALITY_CAP = 40.0
MAP_QUALITY_CAP = 60.0
MAP_QUALITY_FILTER = 10.0
class ImageChannels:
"""
Handles how many channels to create for each base and their way of construction.
"""
@staticmethod
def get_base_color(base):
"""
Get color based on a base.
- Uses different band of the same channel.
:param base:
:return:
"""
if base == 'A':
return 250.0
if base == 'C':
return 100.0
if base == 'G':
return 180.0
if base == 'T':
return 30.0
if base == '*' or 'N':
return 5.0
@staticmethod
def get_base_quality_color(base_quality):
"""
Get a color spectrum given base quality
:param base_quality: value of base quality
:return:
"""
c_q = min(base_quality, BASE_QUALITY_CAP)
color = MAX_COLOR_VALUE * c_q / BASE_QUALITY_CAP
return color
@staticmethod
def get_map_quality_color(map_quality):
"""
Get a color spectrum given mapping quality
:param map_quality: value of mapping quality
:return:
"""
c_q = min(map_quality, MAP_QUALITY_CAP)
color = MAX_COLOR_VALUE * c_q / MAP_QUALITY_CAP
return color
@staticmethod
def get_strand_color(is_rev):
"""
Get color for forward and reverse reads
:param is_rev: True if read is reversed
:return:
"""
if is_rev is True:
return 240
else:
return 70
@staticmethod
def get_match_ref_color(is_match):
"""
Get color for base matching to reference
:param is_match: If true, base matches to reference
:return:
"""
if is_match is True:
return MAX_COLOR_VALUE * 0.2
else:
return MAX_COLOR_VALUE * 1.0
@staticmethod
def get_alt_support_color(is_in_support):
"""
***NOT USED YET***
:param is_in_support:
:return:
"""
if is_in_support is True:
return MAX_COLOR_VALUE * 1.0
else:
return MAX_COLOR_VALUE * 0.6
@staticmethod
def get_empty_channels():
"""
Get empty channel values
:return:
"""
return [0, 0, 0, 0, 0, 0]
@staticmethod
def get_channels(attribute_tuple):
"""
Get a bases's channel construction
:return: [color spectrum of channels based on base attributes]
"""
base, base_q, map_q, is_rev, is_match, is_supporting = attribute_tuple
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(base_q)
map_quality_color = ImageChannels.get_map_quality_color(map_q)
strand_color = ImageChannels.get_strand_color(is_rev)
match_color = ImageChannels.get_match_ref_color(is_match)
get_support_color = ImageChannels.get_alt_support_color(is_supporting)
return [base_color, base_quality_color, map_quality_color, strand_color, match_color, get_support_color]
@staticmethod
def get_ref_channels(base):
"""
Get a reference bases's channel construction
:param base: Reference base
:return: [color spectrum of channels based on some default values]
"""
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(60)
map_quality_color = ImageChannels.get_map_quality_color(60)
strand_color = ImageChannels.get_strand_color(is_rev=False)
match_color = ImageChannels.get_match_ref_color(is_match=True)
support_color = ImageChannels.get_alt_support_color(is_in_support=True)
return [base_color, base_quality_color, map_quality_color, strand_color, match_color, support_color]
# RGB image creator
# ---ONLY USED FOR TESTING--- #
@staticmethod
def get_empty_rgb_channels():
return [0, 0, 0, 255]
@staticmethod
def get_color_for_base_rgb(ref, base):
if ref == base and ref != '*':
return 255, 255, 255
elif base == 'A':
return 255, 0, 0
elif base == 'C':
return 255, 255, 0
elif base == 'T':
return 0, 0, 255
elif base == 'G':
return 0, 255, 0
else:
return 255, 0, 255
@staticmethod
def get_channels_only_rgb(attribute_tuple, ref_base):
base, base_q, map_q, is_rev, is_match, is_supporting = attribute_tuple
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(base_q)
map_quality_color = ImageChannels.get_map_quality_color(map_q)
strand_color = ImageChannels.get_strand_color(is_rev)
match_color = ImageChannels.get_match_ref_color(is_match)
support_color = ImageChannels.get_alt_support_color(is_supporting)
r, g, b = ImageChannels.get_color_for_base_rgb(ref_base, base)
return [r, g, b, support_color]
@staticmethod
def get_ref_channels_rgb(base):
base_color = ImageChannels.get_base_color(base)
base_quality_color = ImageChannels.get_base_quality_color(60)
map_quality_color = ImageChannels.get_map_quality_color(60)
strand_color = ImageChannels.get_strand_color(is_rev=False)
get_match_color = ImageChannels.get_match_ref_color(is_match=True)
r, g, b = ImageChannels.get_color_for_base_rgb('', base)
support_color = ImageChannels.get_alt_support_color(is_in_support=True)
return [r, g, b, support_color] | [
"kishwar.shafin@gmail.com"
] | kishwar.shafin@gmail.com |
994c43c41bf04d16ce58d05102eb476e9d4a0d08 | 8f2e6e38bb7ba2205cba57b0beae146d29f0ad3b | /chap7/chap7_1_ngram_2.py | de7b69eb1d767e0de948234dc5e5071f6c3c5927 | [] | no_license | KimDoKy/WebScrapingWithPython | fa08ba83ba560d4f24cddb5e55de938a380dfec2 | bc7dd8a36d3ee0f8e3a13ae9fe0d074733b45938 | refs/heads/master | 2020-12-02T19:20:26.285450 | 2017-08-25T14:27:36 | 2017-08-25T14:27:36 | 96,326,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
def ngrams(input, n):
input = re.sub('\n+', " ", input)
input = re.sub(' +', " ", input)
input = bytes(input, "UTF-8")
input = input.decode("ascii", "ignore")
print(input)
input = input.split(' ')
output = []
for i in range(len(input)-n+1):
output.append(input[i:i+n])
return output
html = urlopen("http://en.wikipedia.org/wiki/Python_(programming_language)")
bsObj = BeautifulSoup(html, "html.parser")
content = bsObj.find("div", {"id":"mw-content-text"}).get_text()
ngrams = ngrams(content, 2)
print(ngrams)
print("2-grams count is: "+str(len(ngrams)))
| [
"makingfunk0@gmail.com"
] | makingfunk0@gmail.com |
df057008387983d394659ff6fc680fea42885a44 | 52e83d67c8b76f83278b61a4c0787abebfa2423c | /DeepLense/Shubham Jain/pipelines/beginner/features/sub_gridding.py | 1d1655e7801803a7e6f870c7e9a0151e7059e250 | [] | no_license | mlsft/gsc_tasks- | 3935142c93cebc978ff35e3f37486438c4dceeed | 84b62aa04f2333d26f8f95a7c0b24c3922bac647 | refs/heads/master | 2022-04-13T16:22:18.054908 | 2020-04-14T11:59:45 | 2020-04-14T11:59:45 | 249,394,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,955 | py | import autofit as af
import autolens as al
### PIPELINE DESCRIPTION ###
# In this pipeline we use sub-grids with different resoultions, that oversample the calculation of light profile
# intensities and mass profile deflection angles. In general, a higher level of sub-gridding provides numerically
# more precise results, at the expense of longer calculations and higher memory usage.
# The 'sub_size' is an input parameter of the pipeline, meaning we can run the pipeline with different binning up
# factors using different runners.
# Phase names are tagged, ensuring phases using different sub-sizes have a unique output path.
# We'll perform a basic analysis which fits a lensed source galaxy using a parametric light profile where
# the lens's light is omitted. This pipeline uses two phases:
# Phase 1:
# Fit the lens mass model and source light profile using x1 source with a sub grid size of 2.
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: None
# Notes: Uses a sub grid size of 2
# Phase 1:
# Refine the lens and source model using a sub grid size of 4.
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: Lens mass (model -> phase 1), source light (model -> phase 1)
# Notes: Uses a sub grid size of 4.
def make_pipeline(phase_folders=None, sub_size=2):
### SETUP PIPELINE & PHASE NAMES, TAGS AND PATHS ###
pipeline_name = "pipeline__feature"
pipeline_tag = "sub_gridding"
# This function uses the phase folders and pipeline name to set up the output directory structure,
# e.g. 'autolens_workspace/output/pipeline_name/pipeline_tag/phase_name/phase_tag/'
phase_folders.append(pipeline_name)
phase_folders.append(pipeline_tag)
# When a phase is passed a 'sub_size,' a setup tag is automatically generated and added to the phase path,
# to make it clear what sub-grid was used. The setup tag, phase name and phase paths are shown for 3
# example sub_sizes:
# sub_size=2 -> phase_path=phase_name/setup_sub_2
# sub_size=3 -> phase_path=phase_name/setup_sub_3
# If the sub-grid size is 1, the tag is an empty string, thus not changing the setup tag:
# sub_size=1 -> phase_path=phase_name/setup
### PHASE 1 ###
# In phase 1, we fit the lens galaxy's mass and one source galaxy, where we:
# 1) Use a sub-grid size of 2x2 in every image pixel.
mass = af.PriorModel(al.mp.EllipticalIsothermal)
mass.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.1)
mass.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.1)
phase1 = al.PhaseImaging(
phase_name="phase_1__x1_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, mass=mass, shear=al.mp.ExternalShear),
source=al.GalaxyModel(redshift=1.0, sersic=al.lp.EllipticalSersic),
),
sub_size=2,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 80
phase1.optimizer.sampling_efficiency = 0.2
### PHASE 2 ###
# In phase 2, we fit the lens galaxy's mass and two source galaxies, where we:
# 1) Use a sub-grid size of 4x4 in every image pixel.
phase2 = al.PhaseImaging(
phase_name="phase_2__x2_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
mass=phase1.result.model.galaxies.lens.mass,
shear=phase1.result.model.galaxies.lens.shear,
),
source=al.GalaxyModel(
redshift=1.0, sersic=phase1.result.model.galaxies.source.sersic
),
),
sub_size=sub_size,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 50
phase2.optimizer.sampling_efficiency = 0.3
return al.PipelineDataset(pipeline_name, phase1, phase2)
| [
"alihariri@MacBook-Air-de-Ali.local"
] | alihariri@MacBook-Air-de-Ali.local |
72d053c6d05b60d62f967c533e27d55e1dabc622 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/combine82/20-tideGauge.py | 998dddd3759e2e9b2093bf2a17792b529133cbb8 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,115 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 16:11:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
dir_in = '/lustre/fs0/home/mtadesse/eraFiveConcat'
dir_out = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
#cd to where the actual file is
os.chdir(dir_in)
x = 20
y = 21
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
cbfc17ecc8356e425a4ddb2c99cb8b93386b6a16 | 8e2e28a191fa5ec5a6c070ec7e9ccad98c8b4a0b | /test/45-类的getattr属性的链式调用.py | b3eac894c94b012291f1fb95de6113032241a472 | [
"Apache-2.0"
] | permissive | kellanfan/python | 4cd61cbc062e2eee3a900fa7447ca5f0b8f1a999 | 912dc05a3bd0ded9544166a68da23ca0a97b84da | refs/heads/master | 2023-04-06T03:04:38.851928 | 2023-04-01T02:45:56 | 2023-04-01T02:45:56 | 65,542,280 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | #/usr/bin/env python
# pylint: disable=no-member
# -*- encoding: utf-8 -*-
'''
@File : ex_chain.py
@Time : 2019/06/04 14:54:20
@Author : Kellan Fan
@Version : 1.0
@Contact : kellanfan1989@gmail.com
@Desc : 现在很多网站都搞REST API,像:http://api.server/user/timeline/list
如果要写SDK,给每个URL对应的API都写一个方法,那得累死,而且,API一旦改动,SDK也要改,
利用完全动态的__getattr__,我们可以写出一个链式调用
'''
# here put the import lib
class Chain(object):
def __init__(self, path=''):
self._path = path
def __getattr__(self, path):
return Chain('%s/%s' % (self._path, path))
def __str__(self):
return self._path
if __name__ == "__main__":
print(Chain().status.user.timeline.list) | [
"icyfk1989@163.com"
] | icyfk1989@163.com |
26bf48c842613a75440758d9f1938cf84989e3ca | 868a66fc60f314dfdb8f434f88d2e8d7c2a6552c | /src/marketing/migrations/0001_initial.py | d626ccf4668a311c8695e8fee618007be70012f6 | [] | no_license | RommelTJ/ecommerce | cdd7a1323ff505b70363f6abc2ce9dff9f52b0a5 | 694dab843b0ca2d4c7cd3b671c3fdb69f063d14b | refs/heads/master | 2023-08-14T22:54:23.368159 | 2023-05-31T05:07:30 | 2023-05-31T05:07:30 | 118,721,454 | 0 | 0 | null | 2023-07-25T20:47:48 | 2018-01-24T06:17:34 | Python | UTF-8 | Python | false | false | 974 | py | # Generated by Django 2.0.1 on 2018-03-05 00:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MarketingPreference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subscribed', models.BooleanField(default=True)),
('mailchimp_msg', models.TextField(blank=True, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('update', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"rommeltj@gmail.com"
] | rommeltj@gmail.com |
34c6a52952b931e9219f69d24931833e3c778a63 | d26f0a7571f951c5c17688a4a395d1bf9039c453 | /torchbiggraph/filtered_eval.py | 0c883dfc9fbf735a3d3863b287df42e059237910 | [
"BSD-3-Clause"
] | permissive | 007vasy/PyTorch-BigGraph | c538af56ff03487b3e108e17747d8a7c4e4d0e7b | c649cf3b70d083d74d253cc95f509eb74fd64fce | refs/heads/master | 2021-01-05T13:38:52.949899 | 2020-02-16T11:40:26 | 2020-02-16T11:41:56 | 241,037,539 | 1 | 0 | NOASSERTION | 2020-02-17T06:42:25 | 2020-02-17T06:42:24 | null | UTF-8 | Python | false | false | 3,744 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
import logging
from collections import defaultdict
from typing import Dict, List, Tuple
from torchbiggraph.config import ConfigSchema
from torchbiggraph.edgelist import EdgeList
from torchbiggraph.eval import RankingEvaluator
from torchbiggraph.graph_storages import EDGE_STORAGES
from torchbiggraph.model import Scores
from torchbiggraph.stats import Stats
from torchbiggraph.types import Partition
logger = logging.getLogger("torchbiggraph")
class FilteredRankingEvaluator(RankingEvaluator):
"""
This Evaluator is meant for datasets such as FB15K, FB15K-237, WN18, WN18RR
used in knowledge base completion. We only support one non featurized,
non-partitioned entity type and evaluation with all negatives to be
comparable to standard benchmarks.
"""
def __init__(self, config: ConfigSchema, filter_paths: List[str]):
super().__init__()
if len(config.relations) != 1 or len(config.entities) != 1:
raise RuntimeError("Filtered ranking evaluation should only be used "
"with dynamic relations and one entity type.")
if not config.relations[0].all_negs:
raise RuntimeError("Filtered Eval can only be done with all negatives.")
entity, = config.entities.values()
if entity.featurized:
raise RuntimeError("Entity cannot be featurized for filtered eval.")
if entity.num_partitions > 1:
raise RuntimeError("Entity cannot be partitioned for filtered eval.")
self.lhs_map: Dict[Tuple[int, int], List[int]] = defaultdict(list)
self.rhs_map: Dict[Tuple[int, int], List[int]] = defaultdict(list)
for path in filter_paths:
logger.info(f"Building links map from path {path}")
e_storage = EDGE_STORAGES.make_instance(path)
# Assume unpartitioned.
edges = e_storage.load_edges(Partition(0), Partition(0))
for idx in range(len(edges)):
# Assume non-featurized.
cur_lhs = int(edges.lhs.to_tensor()[idx])
# Assume dynamic relations.
cur_rel = int(edges.rel[idx])
# Assume non-featurized.
cur_rhs = int(edges.rhs.to_tensor()[idx])
self.lhs_map[cur_lhs, cur_rel].append(cur_rhs)
self.rhs_map[cur_rhs, cur_rel].append(cur_lhs)
logger.info(f"Done building links map from path {path}")
def eval(
self,
scores: Scores,
batch_edges: EdgeList,
) -> Stats:
for idx in range(len(batch_edges)):
# Assume non-featurized.
cur_lhs = int(batch_edges.lhs.to_tensor()[idx])
# Assume dynamic relations.
cur_rel = int(batch_edges.rel[idx])
# Assume non-featurized.
cur_rhs = int(batch_edges.rhs.to_tensor()[idx])
rhs_edges_filtered = self.lhs_map[cur_lhs, cur_rel]
lhs_edges_filtered = self.rhs_map[cur_rhs, cur_rel]
assert cur_lhs in lhs_edges_filtered
assert cur_rhs in rhs_edges_filtered
# The rank is computed as the number of non-negative margins (as
# that means a negative with at least as good a score as a positive)
# so to avoid counting positives we give them a negative margin.
scores.lhs_neg[idx][lhs_edges_filtered] = -1e9
scores.rhs_neg[idx][rhs_edges_filtered] = -1e9
return super().eval(scores, batch_edges)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f3772896550c91f8ddfe5217abbaf0e8bf640fc2 | 66a2276f011feb5386db5e480bc0f9c2da6f4f3f | /3ProjectEuler/i101_125/i112bouncy_numbers.py | cd28bd1e6ce44784988e2521946541bbf3762d51 | [] | no_license | greatabel/puzzle_I_cracked | 09edee29464974f9b5358910c5097aa1b4d9aec2 | 7dc252923f17fd89de201dc2ac7540d54d69420d | refs/heads/master | 2021-06-02T11:05:19.962065 | 2019-10-19T14:21:56 | 2019-10-19T14:21:56 | 31,331,091 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | '''
Working from left-to-right if no digit is exceeded by the digit to its left it is called
an increasing number; for example, 134468.
Similarly if no digit is exceeded by the digit to its right it is called a decreasing number;
for example, 66420.
We shall call a positive integer that is neither increasing nor decreasing a "bouncy" number;
for example, 155349.
Clearly there cannot be any bouncy numbers below one-hundred, but just over half of the numbers
below one-thousand (525) are bouncy. In fact, the least number for which the proportion of bouncy
numbers first reaches 50% is 538.
Surprisingly, bouncy numbers become more and more common and by the time we reach 21780 the proportion
of bouncy numbers is equal to 90%.
Find the least number for which the proportion of bouncy numbers is exactly 99%.
'''
import time
from termcolor import colored
def is_increase(n):
n = str(n)
for i in range(len(n)-1):
if int(n[i+1]) < int(n[i]):
return False
return True
def is_decrease(n):
n = str(n)
for i in range(len(n)-1):
if int(n[i+1]) > int(n[i]):
return False
return True
def is_bouncy(n):
increase = is_increase(n)
decrease = is_decrease(n)
if not increase and not decrease:
return True
else:
return False
def main_process():
i = 0
t = 0
bouncy_count = 0
while t < 0.99:
i += 1
if is_bouncy(i):
bouncy_count += 1
t = bouncy_count / i
if i % 10 ** 4 == 0:
print(i)
# for i in range(190, 220):
# print(i, 'is_increase=', is_increase(i), 'is_decrease=', is_decrease(i))
print(colored('mycount=', 'red'), i)
if __name__ == "__main__":
tic = time.clock()
main_process()
toc = time.clock()
print("time=",toc - tic) | [
"myreceiver2for2github@gmail.com"
] | myreceiver2for2github@gmail.com |
1e01b6ba10197c7552618bd21d4987fae7536559 | 1a7b21525a2bc95511c2c382f301250dec3a2c39 | /test/test_validation.py | d2a5d61c8202f27b843d8dabd336f8325021aec0 | [
"BSD-2-Clause"
] | permissive | ajbansal/sphinxcontrib-confluencebuilder | 4ad742dc1ca73402076f51ff991e8196f8a62cab | 330e31afc4d80b78595113a538b057ecd3323867 | refs/heads/master | 2020-03-28T02:26:55.020788 | 2018-08-08T08:26:08 | 2018-08-08T08:26:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,850 | py | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2018 by the contributors (see AUTHORS file).
:license: BSD-2-Clause, see LICENSE for details.
"""
from sphinxcontrib.confluencebuilder.builder import ConfluenceBuilder
from sphinxcontrib_confluencebuilder_util import ConfluenceTestUtil as _
from subprocess import check_output
import io
import os
import sys
import unittest
DEFAULT_TEST_BASE = 'sphinxcontrib-confluencebuilder Home'
DEFAULT_TEST_DESC = 'test state'
DEFAULT_TEST_KEY = 'test-holder'
DEFAULT_TEST_SPACE = 'confluencebuilder'
DEFAULT_TEST_URL = 'https://jdknight.atlassian.net/wiki/'
DEFAULT_TEST_USER = 'sphinxcontrib-confluencebuilder'
class TestConfluenceValidation(unittest.TestCase):
@classmethod
def setUpClass(cls):
_.enableVerbose()
# build configuration
cls.config = _.prepareConfiguration()
cls.config['confluence_disable_notifications'] = True
cls.config['confluence_disable_xmlrpc'] = True
cls.config['confluence_page_hierarchy'] = True
cls.config['confluence_parent_page'] = DEFAULT_TEST_BASE
cls.config['confluence_publish'] = True
cls.config['confluence_space_name'] = DEFAULT_TEST_SPACE
cls.config['confluence_server_url'] = DEFAULT_TEST_URL
cls.config['confluence_server_user'] = DEFAULT_TEST_USER
cls.config['confluence_timeout'] = 1
cls.test_desc = DEFAULT_TEST_DESC
cls.test_key = DEFAULT_TEST_KEY
# overrides from user
try:
from validation_test_overrides import config_overrides
cls.config.update(config_overrides)
except ImportError:
pass
try:
from validation_test_overrides import config_test_desc
cls.test_desc = config_test_desc
except ImportError:
pass
try:
from validation_test_overrides import config_test_key
cls.test_key = config_test_key
except ImportError:
pass
# finalize configuration
cls.config['confluence_publish_prefix'] = ''
cls.config['confluence_purge'] = False
cls.config['rst_epilog'] = """
.. |test_key| replace:: {}
.. |test_desc| replace:: {}
""".format(cls.test_key, cls.test_desc)
# find validate-sets base folder
test_dir = os.path.dirname(os.path.realpath(__file__))
cls.datasets = os.path.join(test_dir, 'validation-sets')
# setup base structure
dataset = os.path.join(cls.datasets, 'base')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-base')
# build/publish test base page
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, cls.config)
app.build(force_all=True)
# finalize configuration for tests
cls.config['confluence_master_homepage'] = False
cls.config['confluence_purge'] = True
cls.config['confluence_purge_from_master'] = True
if cls.test_key != DEFAULT_TEST_KEY:
cls.config['confluence_publish_prefix'] = '{}-'.format(cls.test_key)
cls.config['confluence_parent_page'] = cls.test_key
def test_autodocs(self):
config = dict(self.config)
config['extensions'].append('sphinx.ext.autodoc')
dataset = os.path.join(self.datasets, 'autodocs')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-autodocs')
sys.path.insert(0, os.path.join(dataset, 'src'))
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
sys.path.pop(0)
def test_common(self):
config = dict(self.config)
dataset = os.path.join(self.datasets, 'common')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-common')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_common_macro_restricted(self):
config = dict(self.config)
dataset = os.path.join(self.datasets, 'common')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-common-nm')
config['confluence_adv_restricted_macros'] = [
'anchor',
'children',
'code',
'info',
]
config['confluence_header_file'] = os.path.join(dataset, 'no-macro.tpl')
config['confluence_publish_prefix'] += 'nomacro-'
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_header_footer(self):
config = dict(self.config)
dataset = os.path.join(self.datasets, 'header-footer')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-hf')
config['confluence_header_file'] = os.path.join(dataset, 'header.tpl')
config['confluence_footer_file'] = os.path.join(dataset, 'footer.tpl')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_hierarchy(self):
config = dict(self.config)
config['confluence_max_doc_depth'] = 2
config['confluence_page_hierarchy'] = True
dataset = os.path.join(self.datasets, 'hierarchy')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-hierarchy')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
def test_xmlrpc(self):
config = dict(self.config)
config['confluence_disable_rest'] = True
config['confluence_disable_xmlrpc'] = False
dataset = os.path.join(self.datasets, 'xmlrpc')
doc_dir, doctree_dir = _.prepareDirectories('validation-set-xmlrpc')
app = _.prepareSphinx(dataset, doc_dir, doctree_dir, config)
app.build(force_all=True)
if __name__ == '__main__':
sys.exit(unittest.main(verbosity=0))
| [
"james.d.knight@live.com"
] | james.d.knight@live.com |
32c7fb2166b64dd14f3224dddb5854d96e353e2d | d32c039a725c9c1e22c4432d777144584b10c040 | /testcase/Shop_test16_shop_follow.py | 46af76e5e59cb07fd757029bf397274daa6a9296 | [] | no_license | heyu1229/Vaffle_interface | 311a2e6f0be4120aaeaccdeb13203529c6f240ca | 0c78ec8709c6af7d31b28f8e2efc8efe4cc3797c | refs/heads/master | 2022-11-24T04:30:48.698641 | 2020-07-22T05:22:47 | 2020-07-22T05:22:47 | 281,582,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | # -*- coding:UTF-8 -*-
import unittest
import requests
import sys,time
import json,xlrd
# sys.path.append("/usr/lib/python3/heaven_interface_vaffle2.0_auto2/public")
import global_list
sys.path.append(global_list.path+"/public_1")
from get_url import Url
from get_version import Version
from get_token import Token
from read_data import Read_ExcelData
from write_data import Write_ExcelData
from func_requests import FuncRequests
#---------------关注、取消关注店铺----------------------
class Shop(unittest.TestCase):
def setUp(self):
self.r=FuncRequests()
#-----------------关注店铺----------------------------------
def testcase_001(self):
sheet_index = 12
row = 19
member_id='744'
print ("testcase_001关注店铺:")
result = self.r.interface_requests(member_id, sheet_index, row)
self.assertEqual(10000, result['code'])
print("code返回值:10000")
#-----------------取消关注店铺----------------------------------
def testcase_002(self):
sheet_index = 12
row = 20
member_id='744'
print ("testcase_002取消关注店铺:")
result = self.r.interface_requests(member_id, sheet_index, row)
self.assertEqual(10000, result['code'])
print("code返回值:10000")
if __name__ == "__main__":
unittest.main() | [
"1004856404@qq.com"
] | 1004856404@qq.com |
e9009e7baa2f49697d7ccc13779a6d676cf5539c | 1f5ecc9bf6dbc82719a94145916b36a76a4e4207 | /mysqlapp/models.py | 79ff5e99b02b29c0eb2c475e84f0bfbe39dfeaf6 | [] | no_license | GanapathiAmbore/Django-with-Multiple-databases | ad32c56af20f3cdaf546f78636aa65257498da33 | f13594e831f136b331554803fa25287642a24c35 | refs/heads/master | 2020-05-30T21:36:25.063995 | 2019-06-03T11:25:49 | 2019-06-03T11:25:49 | 189,973,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | from django.db import models
from django.core.validators import RegexValidator
class Teacher(models.Model):
name=models.CharField(max_length=50)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$',
message="Phone number must be entered in the format: '+91XXXXXXX'. Up to 10 digits allowed.")
phone_number = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
email=models.EmailField()
image=models.ImageField()
date=models.DateTimeField(auto_now_add=True)
address=models.TextField()
def __str__(self):
return self.name
class Student(models.Model):
name=models.CharField(max_length=25)
age=models.IntegerField()
addres=models.TextField()
def __str__(self):
return self.name | [
"ganapathiambore@gmail.com"
] | ganapathiambore@gmail.com |
49fc4bd9ec51d4875264fe2d0a104fd1b0a32a30 | 20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7 | /old/s3c/gen.py | c350d19d23465b2c9eeb5b5c3d8bc2e898069c2a | [] | no_license | sarahboufelja54/galatea | f5664f0b3117629b2c5bbe078a1bd52bb5e359e6 | 002a9f2905868be25b71770190fb2d5eda11c861 | refs/heads/master | 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | #script = open('config/6x6/extract/H_interm.sh','w')
for i in xrange(10):
start = i * 5000
end = (i+1) * 5000
char = chr(ord('A')+i)
fname = 'config/6x6/extract/H_interm_exp_h_3_train_cpu_'+char+'.yaml'
#script.write('THEANO_FLAGS="device=gpu0" python extract_features.py '+fname+'\n')
f = open(fname,'w')
f.write("""!obj:galatea.s3c.extract_features.FeatureExtractor {
"batch_size" : 1,
"model_path" : "${GALATEA_PATH}/s3c/config/6x6/H_interm_cpu.pkl", "pooling_region_counts": [3],
"save_paths" : [ "${FEATURE_EXTRACTOR_YAML_PATH}.npy" ],
"feature_type" : "exp_h",
"dataset_name" : "cifar10",
"restrict" : [ %d, %d ],
"which_set" : "train"
}""" % (start, end))
f.close()
#script.close()
| [
"goodfellow.ian@gmail.com"
] | goodfellow.ian@gmail.com |
1c97fdfd8b1a09919f2abec63ef2e4d7406e361d | 80a19d955bf4bc041227636a007bc494b46efa14 | /tensorflow_probability/python/experimental/mcmc/gradient_based_trajectory_length_adaptation.py | b55e506a54f9004017ee708db968b61c6740d118 | [
"Apache-2.0"
] | permissive | yadevi/probability | 5be98644b05a110f111fe1e0bd32737b0771dfa1 | e1aca8e0bc0d95d4ae61edab21171710f3dbe160 | refs/heads/master | 2023-01-12T15:43:07.008492 | 2020-10-31T21:02:41 | 2020-10-31T21:04:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,101 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Gradient-based trajectory length adaptation kernel."""
import collections
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental import unnest
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.math import gradient
from tensorflow_probability.python.mcmc import kernel as kernel_base
from tensorflow_probability.python.mcmc import simple_step_size_adaptation
from tensorflow_probability.python.mcmc import transformed_kernel
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
__all__ = [
'chees_criterion',
'GradientBasedTrajectoryLengthAdaptation',
'GradientBasedTrajectoryLengthAdaptationResults',
]
MAX_HALTON_SEQUENCE_BITS = 10 # Generates up to 1024 unique trajectory lengths.
class GradientBasedTrajectoryLengthAdaptationResults(
mcmc_util.PrettyNamedTupleMixin,
collections.namedtuple('GradientBasedTrajectoryLengthAdaptationResults', [
'inner_results',
'max_trajectory_length',
'step',
'adaptation_rate',
'jitter_amount',
'averaged_sq_grad',
'averaged_max_trajectory_length',
'criterion',
'seed',
])):
"""Internal state of GradientBasedTrajectoryLengthAdaptation.
Attributes:
inner_results: Results of the inner kernel.
max_trajectory_length: Floating point scalar `Tensor`. Maximum HMC
trajectory length.
step: Int32 scalar `Tensor`. The number of steps this kernel has taken.
Increases by 1 for every call to `one_step`.
adaptation_rate: Floating point scalar `Tensor`. How rapidly to adapt the
trajectory length.
jitter_amount: Floating point scalar `Tensor`. How much to jitter the
trajectory on the next step. The trajectory length is sampled from `[(1 -
jitter_amount) * max_trajectory_length, max_trajectory_length]`.
averaged_sq_grad: Floating point scalar `Tensor`. Moving average of squared
criterion gradients.
averaged_max_trajectory_length: Floating point scalar `Tensor`. Moving
average of the maximum of trajectory length. This is used after the burnin
period.
criterion: Floating point `Tensor` with shape `[C0, ..., Cb]` with `b > 0`.
The value of the criterion returned by the `criterion_fn` corresponding to
each Markov chain.
seed: Int32 `Tensor` with shape `[2]`. The random seed used by the kernel in
the previous step.
"""
__slots__ = ()
def hmc_like_num_leapfrog_steps_getter_fn(kernel_results):
"""Getter for `num_leapfrog_steps` so it can be inspected."""
return unnest.get_innermost(kernel_results, 'num_leapfrog_steps')
def hmc_like_num_leapfrog_steps_setter_fn(kernel_results,
new_num_leapfrog_steps):
"""Setter for `num_leapfrog_steps` so it can be adapted."""
return unnest.replace_innermost(
kernel_results, num_leapfrog_steps=new_num_leapfrog_steps)
def hmc_like_proposed_velocity_getter_fn(kernel_results):
"""Getter for `proposed_velocity` so it can be inspected."""
# TODO(b/169898033): This only works with the standard kinetic energy term.
proposed_velocity = unnest.get_innermost(kernel_results, 'final_momentum')
proposed_state = unnest.get_innermost(kernel_results, 'proposed_state')
# proposed_velocity has the wrong structure when state is a scalar.
return tf.nest.pack_sequence_as(proposed_state,
tf.nest.flatten(proposed_velocity))
def hmc_like_proposed_state_getter_fn(kernel_results):
"""Getter for `proposed_state` so it can be inspected."""
return unnest.get_innermost(kernel_results, 'proposed_state')
def hmc_like_step_size_getter_fn(kernel_results):
# This is here due to the circular imports.
return simple_step_size_adaptation.hmc_like_step_size_getter_fn(
kernel_results)
def hmc_like_log_accept_prob_getter_fn(kernel_results):
# This is here due to the circular imports.
return simple_step_size_adaptation.hmc_like_log_accept_prob_getter_fn(
kernel_results)
def chees_criterion(previous_state,
proposed_state,
accept_prob,
validate_args=False):
"""The ChEES criterion from [1].
ChEES stands for Change in the Estimator of the Expected Square.
```None
ChEES = 1/4 E[(||x' - E[x]||**2 - ||x - E[x]||**2)**2],
```
where `x` is the previous chain state, `x'` is the next chain state, and
`||.||` is the L2 norm. Both expectations are with respect to the chain's
stationary distribution. In practice, the inner expectation is replaced by the
empirical mean across chains, so computing this criterion requires that at
least 2 chains are present. The outer expectation is computed by the caller
(e.g. in the `GradientBasedTrajectoryLengthAdaptation` kernel).
This can be thought of as the standard expected squared jump distance (ESJD)
criterion, except that the jump distance is computed in the space of centered
squared L2 norms.
Unlike ChEES, regular ESJD is maximized by perfectly anticorrelated proposals,
which can give excellent mean estimates but terrible variance estimates;
maximizing ChEES should give good estimates across a wider range of types of
posterior expectations.
Args:
previous_state: (Possibly nested) floating point `Tensor`. The previous
state of the HMC chain.
proposed_state: (Possibly nested) floating point `Tensor`. The proposed
state of the HMC chain.
accept_prob: Floating `Tensor`. Probability of acceping the proposed state.
validate_args: Whether to perform non-static argument validation.
Returns:
chees: The value of the ChEES criterion.
Raises:
ValueError: If `accept_prob` indicates that there are fewer than 2 chains.
#### References
[1]: Hoffman, M., Radul, A., & Sountsov, P. (2020). An Adaptive MCMC Scheme
for Setting Trajectory Lengths in Hamiltonian Monte Carlo. In
preparation.
"""
batch_ndims = ps.rank(accept_prob)
batch_axes = ps.range(batch_ndims, dtype=tf.int32)
num_chains = ps.size(accept_prob)
num_chains_ = tf.get_static_value(num_chains)
if num_chains_ is not None:
if num_chains_ < 2:
raise ValueError(
'chees_criterion requires at least 2 chains. Got: {}'.format(
num_chains_))
elif validate_args:
with tf.control_dependencies([
assert_util.assert_greater_equal(
num_chains, 2, 'chees_criterion requires at least 2 chains.')
]):
previous_state = tf.nest.map_structure(tf.identity, previous_state)
def _center_previous_state(x):
# The empirical mean here is a stand-in for the true mean, so we drop the
# gradient that flows through this term.
return x - tf.stop_gradient(tf.reduce_mean(x, axis=batch_axes))
def _center_proposed_state(x):
# The empirical mean here is a stand-in for the true mean, so we drop the
# gradient that flows through this term. The goal here is to get a reliable
# diagnostic of the unrelying dynamics, rather than incorporating the effect
# of the MetropolisHastings correction.
# TODO(mhoffman): Needs more experimentation.
expanded_accept_prob = mcmc_util.left_justified_expand_dims_like(
accept_prob, x)
# accept_prob is zero when x is NaN, but we still want to sanitize such
# values.
x_safe = tf.where(tf.math.is_finite(x), x, tf.zeros_like(x))
# If all accept_prob's are zero, the x_center will have a nonsense value,
# but we'll discard the resultant gradients later on, so it's fine.
x_center = (
tf.reduce_sum(expanded_accept_prob * x_safe, axis=batch_axes) /
(tf.reduce_sum(expanded_accept_prob, axis=batch_axes) + 1e-20))
return x - tf.stop_gradient(x_center)
def _sum_event_part(x):
event_axes = ps.range(batch_ndims, ps.rank(x))
return tf.reduce_sum(x, axis=event_axes)
def _sum_event(x):
return sum(tf.nest.flatten(tf.nest.map_structure(
_sum_event_part,
x,
)))
def _square(x):
return tf.nest.map_structure(tf.square, x)
def _sub(x, y):
return tf.nest.map_structure(lambda x, y: x - y, x, y)
previous_state = tf.nest.map_structure(_center_previous_state, previous_state)
proposed_state = tf.nest.map_structure(_center_proposed_state, proposed_state)
chees = 0.25 * tf.square(
_sum_event(_sub(_square(proposed_state), _square(previous_state))))
return chees
class GradientBasedTrajectoryLengthAdaptation(kernel_base.TransitionKernel):
"""Use gradient ascent to adapt inner kernel's trajectory length.
This kernel optimizes the continuous trajectory length (aka integration time)
parameter of Hamiltonian Monte Carlo. It does so by following the gradient of
a criterion with respect to the trajectory length. The criterion is computed
via `criterion_fn` with signature `(previous_state, proposed_state,
accept_prob) -> criterion`, where both the returned values retain the batch
dimensions implied by the first three inputs. See `chees_criterion` for an
example.
To avoid resonances, this kernel jitters the integration time between 0 and
the learned trajectory length by default.
The initial trajectory length is extracted from the inner
`HamiltonianMonteCarlo` kernel by multiplying the initial step size and
initial number of leapfrog steps. This (and other algorithmic details) imply
that the step size must be a scalar.
In general, adaptation prevents the chain from reaching a stationary
distribution, so obtaining consistent samples requires `num_adaptation_steps`
be set to a value [somewhat smaller][1] than the number of burnin steps.
However, it may sometimes be helpful to set `num_adaptation_steps` to a larger
value during development in order to inspect the behavior of the chain during
adaptation.
#### Examples
This implements something similar to ChEES HMC from [2].
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
target_log_prob_fn = tfd.JointDistributionSequential([
tfd.Normal(0., 20.),
tfd.HalfNormal(10.),
]).log_prob
num_burnin_steps = 1000
num_adaptation_steps = int(num_burnin_steps * 0.8)
num_results = 500
num_chains = 16
step_size = 0.1
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=step_size,
num_leapfrog_steps=1,
)
kernel = tfp.experimental.mcmc.GradientBasedTrajectoryLengthAdaptation(
kernel,
num_adaptation_steps=num_adaptation_steps)
kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
kernel, num_adaptation_steps=num_adaptation_steps)
kernel = tfp.mcmc.TransformedTransitionKernel(
kernel,
[tfb.Identity(),
tfb.Exp()])
def trace_fn(_, pkr):
return (
pkr.inner_results.inner_results.inner_results.accepted_results
.step_size,
pkr.inner_results.inner_results.max_trajectory_length,
pkr.inner_results.inner_results.inner_results.log_accept_ratio,
)
# The chain will be stepped for num_results + num_burnin_steps, adapting for
# the first num_adaptation_steps.
samples, [step_size, max_trajectory_length, log_accept_ratio] = (
tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[tf.zeros(num_chains),
tf.zeros(num_chains)],
kernel=kernel,
trace_fn=trace_fn,))
# ~0.75
accept_prob = tf.math.exp(tfp.math.reduce_logmeanexp(
tf.minimum(log_accept_ratio, 0.)))
```
#### References
[1]: <http://andrewgelman.com/2017/12/15/
burn-vs-warm-iterative-simulation-algorithms/#comment-627745>
[2]: Hoffman, M., Radul, A., & Sountsov, P. (2020). An Adaptive MCMC Scheme
for Setting Trajectory Lengths in Hamiltonian Monte Carlo. In
preparation.
"""
def __init__(
self,
inner_kernel,
num_adaptation_steps,
use_halton_sequence_jitter=True,
adaptation_rate=0.025,
jitter_amount=1.,
criterion_fn=chees_criterion,
max_leapfrog_steps=1000,
num_leapfrog_steps_getter_fn=hmc_like_num_leapfrog_steps_getter_fn,
num_leapfrog_steps_setter_fn=hmc_like_num_leapfrog_steps_setter_fn,
step_size_getter_fn=hmc_like_step_size_getter_fn,
proposed_velocity_getter_fn=hmc_like_proposed_velocity_getter_fn,
log_accept_prob_getter_fn=hmc_like_log_accept_prob_getter_fn,
proposed_state_getter_fn=hmc_like_proposed_state_getter_fn,
validate_args=False,
name=None):
"""Creates the trajectory length adaptation kernel.
The default setter_fn and the getter_fn callbacks assume that the inner
kernel produces kernel results structurally the same as the
`HamiltonianMonteCarlo` kernel (possibly wrapped in some step size
adaptation kernel).
Args:
inner_kernel: `TransitionKernel`-like object.
num_adaptation_steps: Scalar `int` `Tensor` number of initial steps to
during which to adjust the step size. This may be greater, less than, or
equal to the number of burnin steps.
use_halton_sequence_jitter: Python bool. Whether to use a Halton sequence
for jittering the trajectory length. This makes the procedure more
stable than sampling trajectory lengths from a uniform distribution.
adaptation_rate: Floating point scalar `Tensor`. How rapidly to adapt the
trajectory length.
jitter_amount: Floating point scalar `Tensor`. How much to jitter the
trajectory on the next step. The trajectory length is sampled from `[(1
- jitter_amount) * max_trajectory_length, max_trajectory_length]`.
criterion_fn: Callable with `(previous_state, proposed_state, accept_prob)
-> criterion`. Computes the criterion value.
max_leapfrog_steps: Int32 scalar `Tensor`. Clips the number of leapfrog
steps to this value.
num_leapfrog_steps_getter_fn: A callable with the signature
`(kernel_results) -> num_leapfrog_steps` where `kernel_results` are the
results of the `inner_kernel`, and `num_leapfrog_steps` is a floating
point `Tensor`.
num_leapfrog_steps_setter_fn: A callable with the signature
`(kernel_results, new_num_leapfrog_steps) -> new_kernel_results` where
`kernel_results` are the results of the `inner_kernel`,
`new_num_leapfrog_steps` is a scalar tensor `Tensor`, and
`new_kernel_results` are a copy of `kernel_results` with the number of
leapfrog steps set.
step_size_getter_fn: A callable with the signature `(kernel_results) ->
step_size` where `kernel_results` are the results of the `inner_kernel`,
and `step_size` is a floating point `Tensor`.
proposed_velocity_getter_fn: A callable with the signature
`(kernel_results) -> proposed_velocity` where `kernel_results` are the
results of the `inner_kernel`, and `proposed_velocity` is a (possibly
nested) floating point `Tensor`. Velocity is derivative of state with
respect to trajectory length.
log_accept_prob_getter_fn: A callable with the signature `(kernel_results)
-> log_accept_prob` where `kernel_results` are the results of the
`inner_kernel`, and `log_accept_prob` is a floating point `Tensor`.
`log_accept_prob` has shape `[C0, ...., Cb]` with `b > 0`.
proposed_state_getter_fn: A callable with the signature `(kernel_results)
-> proposed_state` where `kernel_results` are the results of the
`inner_kernel`, and `proposed_state` is a (possibly nested) floating
point `Tensor`.
validate_args: Python `bool`. When `True` kernel parameters are checked
for validity. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class. Default:
'simple_step_size_adaptation'.
Raises:
ValueError: If `inner_kernel` contains a `TransformedTransitionKernel` in
its hierarchy. If you need to use the `TransformedTransitionKernel`,
place it above this kernel in the hierarchy (see the example in the
class docstring).
"""
inner_kernel = mcmc_util.enable_store_parameters_in_results(inner_kernel)
_forbid_inner_transformed_kernel(inner_kernel)
with tf.name_scope(
mcmc_util.make_name(name, 'gradient_based_trajectory_length_adaptation',
'__init__')) as name:
dtype = dtype_util.common_dtype([adaptation_rate, jitter_amount],
tf.float32)
num_adaptation_steps = tf.convert_to_tensor(
num_adaptation_steps, dtype=tf.int32, name='num_adaptation_steps')
adaptation_rate = tf.convert_to_tensor(
adaptation_rate, dtype=dtype, name='adaptation_rate')
jitter_amount = tf.convert_to_tensor(
jitter_amount, dtype=dtype, name='jitter_amount')
max_leapfrog_steps = tf.convert_to_tensor(
max_leapfrog_steps, dtype=tf.int32, name='max_leapfrog_steps')
self._parameters = dict(
inner_kernel=inner_kernel,
num_adaptation_steps=num_adaptation_steps,
use_halton_sequence_jitter=use_halton_sequence_jitter,
adaptation_rate=adaptation_rate,
jitter_amount=jitter_amount,
criterion_fn=criterion_fn,
max_leapfrog_steps=max_leapfrog_steps,
num_leapfrog_steps_getter_fn=num_leapfrog_steps_getter_fn,
num_leapfrog_steps_setter_fn=num_leapfrog_steps_setter_fn,
step_size_getter_fn=step_size_getter_fn,
proposed_velocity_getter_fn=proposed_velocity_getter_fn,
log_accept_prob_getter_fn=log_accept_prob_getter_fn,
proposed_state_getter_fn=hmc_like_proposed_state_getter_fn,
validate_args=validate_args,
name=name,
)
@property
def inner_kernel(self):
return self._parameters['inner_kernel']
@property
def use_halton_sequence_jitter(self):
return self._parameters['use_halton_sequence_jitter']
@property
def num_adaptation_steps(self):
return self._parameters['num_adaptation_steps']
def criterion_fn(self, previous_state, proposed_state, accept_prob):
return self._parameters['criterion_fn'](previous_state, proposed_state,
accept_prob)
@property
def max_leapfrog_steps(self):
return self._parameters['max_leapfrog_steps']
def num_leapfrog_steps_getter_fn(self, kernel_results):
return self._parameters['num_leapfrog_steps_getter_fn'](kernel_results)
def num_leapfrog_steps_setter_fn(self, kernel_results,
new_num_leapfrog_steps):
return self._parameters['num_leapfrog_steps_setter_fn'](
kernel_results, new_num_leapfrog_steps)
def step_size_getter_fn(self, kernel_results):
return self._parameters['step_size_getter_fn'](kernel_results)
def proposed_velocity_getter_fn(self, kernel_results):
return self._parameters['proposed_velocity_getter_fn'](kernel_results)
def log_accept_prob_getter_fn(self, kernel_results):
return self._parameters['log_accept_prob_getter_fn'](kernel_results)
def proposed_state_getter_fn(self, kernel_results):
return self._parameters['proposed_state_getter_fn'](kernel_results)
@property
def validate_args(self):
return self._parameters['validate_args']
@property
def name(self):
return self._parameters['name']
@property
def parameters(self):
"""Return `dict` of ``__init__`` arguments and their values."""
return self._parameters
def one_step(self, current_state, previous_kernel_results, seed=None):
with tf.name_scope(
mcmc_util.make_name(self.name,
'gradient_based_trajectory_length_adaptation',
'one_step')):
jitter_seed, inner_seed = samplers.split_seed(seed)
dtype = previous_kernel_results.adaptation_rate.dtype
current_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, dtype=dtype), current_state)
step_f = tf.cast(previous_kernel_results.step, dtype)
if self.use_halton_sequence_jitter:
trajectory_jitter = _halton_sequence(step_f)
else:
trajectory_jitter = samplers.uniform((), seed=jitter_seed, dtype=dtype)
jitter_amount = previous_kernel_results.jitter_amount
trajectory_jitter = (
trajectory_jitter * jitter_amount + (1. - jitter_amount))
adapting = previous_kernel_results.step < self.num_adaptation_steps
max_trajectory_length = tf.where(
adapting, previous_kernel_results.max_trajectory_length,
previous_kernel_results.averaged_max_trajectory_length)
jittered_trajectory_length = (max_trajectory_length * trajectory_jitter)
step_size = _ensure_step_size_is_scalar(
self.step_size_getter_fn(previous_kernel_results), self.validate_args)
num_leapfrog_steps = tf.cast(
tf.maximum(
tf.ones([], dtype),
tf.math.ceil(jittered_trajectory_length / step_size)), tf.int32)
previous_kernel_results_with_jitter = self.num_leapfrog_steps_setter_fn(
previous_kernel_results, num_leapfrog_steps)
new_state, new_inner_results = self.inner_kernel.one_step(
current_state, previous_kernel_results_with_jitter.inner_results,
inner_seed)
proposed_state = self.proposed_state_getter_fn(new_inner_results)
proposed_velocity = self.proposed_velocity_getter_fn(new_inner_results)
accept_prob = tf.exp(self.log_accept_prob_getter_fn(new_inner_results))
new_kernel_results = _update_trajectory_grad(
previous_kernel_results_with_jitter,
previous_state=current_state,
proposed_state=proposed_state,
proposed_velocity=proposed_velocity,
trajectory_jitter=trajectory_jitter,
accept_prob=accept_prob,
step_size=step_size,
criterion_fn=self.criterion_fn,
max_leapfrog_steps=self.max_leapfrog_steps)
# Undo the effect of adaptation if we're not in the burnin phase. We keep
# the criterion, however, as that's a diagnostic. We also keep the
# leapfrog steps setting, as that's an effect of jitter (and also doubles
# as a diagnostic).
criterion = new_kernel_results.criterion
new_kernel_results = mcmc_util.choose(
adapting, new_kernel_results, previous_kernel_results_with_jitter)
new_kernel_results = new_kernel_results._replace(
inner_results=new_inner_results,
step=previous_kernel_results.step + 1,
criterion=criterion)
return new_state, new_kernel_results
def bootstrap_results(self, init_state):
with tf.name_scope(
mcmc_util.make_name(self.name,
'gradient_based_trajectory_length_adaptation',
'bootstrap_results')):
inner_results = self.inner_kernel.bootstrap_results(init_state)
dtype = self.parameters['adaptation_rate'].dtype
init_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, dtype=dtype), init_state)
step_size = _ensure_step_size_is_scalar(
self.step_size_getter_fn(inner_results), self.validate_args)
init_max_trajectory_length = (
step_size *
tf.cast(self.num_leapfrog_steps_getter_fn(inner_results), dtype))
results = GradientBasedTrajectoryLengthAdaptationResults(
inner_results=inner_results,
max_trajectory_length=init_max_trajectory_length,
step=tf.zeros([], tf.int32),
adaptation_rate=self.parameters['adaptation_rate'],
jitter_amount=self.parameters['jitter_amount'],
averaged_sq_grad=tf.zeros([], dtype),
averaged_max_trajectory_length=tf.zeros([], dtype),
criterion=tf.zeros_like(
self.log_accept_prob_getter_fn(inner_results)),
seed=samplers.zeros_seed(),
)
return results
@property
def is_calibrated(self):
return self.inner_kernel.is_calibrated
def _forbid_inner_transformed_kernel(inner_kernel):
"""Forbids inner kernel from containing `TransformedTransitionKernel`."""
# TODO(b/169898277): The issue is that the proposed_velocity must be in the
# same space as the chain state, and TransformedTransitionKernel breaks that
# invariant.
while hasattr(inner_kernel,
'parameters') and 'inner_kernel' in inner_kernel.parameters:
if isinstance(inner_kernel, transformed_kernel.TransformedTransitionKernel):
raise ValueError(
'The inner kernel cannot contain a `TransformedTransitionKernel`. '
'Please place the `TransformedTransitionKernel` above this kernel '
'in the hierarchy (see the docstring example of '
'`GradientBasedTrajectoryLengthAdaptation` kernel.)')
inner_kernel = inner_kernel.parameters['inner_kernel']
def _ensure_step_size_is_scalar(step_size, validate_args):
"""Ensures that the step size is a scalar `Tensor`."""
if tf.nest.is_nested(step_size):
raise ValueError('Step size must be a scalar. Got: {}'.format(step_size))
rank = ps.rank(step_size)
rank_ = tf.get_static_value(rank)
if rank_ is not None:
if rank_ != 0:
raise ValueError('Step size must be a scalar. Got: {}'.format(step_size))
elif validate_args:
with tf.control_dependencies(
[assert_util.assert_rank(step_size, 0, 'Step size must be a scalar.')]):
return tf.identity(step_size)
return step_size
def _halton_sequence(i, max_bits=MAX_HALTON_SEQUENCE_BITS):
bit_masks = 2**tf.range(max_bits, dtype=i.dtype)
return tf.einsum('i,i->', tf.math.mod((i + 1) // bit_masks, 2),
0.5 / bit_masks)
def _update_trajectory_grad(previous_kernel_results, previous_state,
proposed_state, proposed_velocity,
trajectory_jitter, accept_prob, step_size,
criterion_fn, max_leapfrog_steps):
"""Updates the trajectory length."""
# Compute criterion grads.
def leapfrog_action(dt):
# This represents the effect on the criterion value as the state follows the
# proposed velocity. This implicitly assumes an identity mass matrix.
return criterion_fn(
previous_state,
tf.nest.map_structure(
lambda x, v: # pylint: disable=g-long-lambda
(x + mcmc_util.left_justified_expand_dims_like(dt, v) * v),
proposed_state,
proposed_velocity),
accept_prob)
criterion, trajectory_grad = gradient.value_and_gradient(
leapfrog_action, tf.zeros_like(accept_prob))
trajectory_grad *= trajectory_jitter
# Weight by acceptance probability.
trajectory_grad = tf.where(accept_prob > 1e-4, trajectory_grad, 0.)
trajectory_grad = tf.where(
tf.math.is_finite(trajectory_grad), trajectory_grad, 0.)
trajectory_grad = (
tf.reduce_sum(trajectory_grad * accept_prob) /
tf.reduce_sum(accept_prob + 1e-20))
# Compute Adam/RMSProp step size.
dtype = previous_kernel_results.adaptation_rate.dtype
iteration_f = tf.cast(previous_kernel_results.step, dtype) + 1.
msg_adaptation_rate = 0.05
new_averaged_sq_grad = (
(1 - msg_adaptation_rate) * previous_kernel_results.averaged_sq_grad +
msg_adaptation_rate * trajectory_grad**2)
adjusted_averaged_sq_grad = new_averaged_sq_grad / (
1. - (1 - msg_adaptation_rate)**iteration_f)
trajectory_step_size = (
previous_kernel_results.adaptation_rate /
tf.sqrt(adjusted_averaged_sq_grad + 1e-20))
# Apply the gradient. Clip absolute value to ~log(2)/2.
log_update = tf.clip_by_value(trajectory_step_size * trajectory_grad, -0.35,
0.35)
new_max_trajectory_length = previous_kernel_results.max_trajectory_length * tf.exp(
log_update)
# Iterate averaging.
average_weight = iteration_f**(-0.5)
new_averaged_max_trajectory_length = tf.exp(
average_weight * tf.math.log(new_max_trajectory_length) +
(1 - average_weight) *
tf.math.log(1e-10 +
previous_kernel_results.averaged_max_trajectory_length))
# Clip the maximum trajectory length.
new_max_trajectory_length = _clip_max_trajectory_length(
new_max_trajectory_length, step_size,
previous_kernel_results.adaptation_rate, max_leapfrog_steps)
return previous_kernel_results._replace(
criterion=criterion,
max_trajectory_length=new_max_trajectory_length,
averaged_sq_grad=new_averaged_sq_grad,
averaged_max_trajectory_length=new_averaged_max_trajectory_length)
def _clip_max_trajectory_length(max_trajectory_length, step_size,
trajectory_adaptation_rate, max_leapfrog_steps):
return tf.where(
trajectory_adaptation_rate > 0,
tf.clip_by_value(
max_trajectory_length, 0.,
step_size * tf.cast(max_leapfrog_steps, max_trajectory_length.dtype)),
max_trajectory_length)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
f3cba79e478e55147fa74e9fbd0f25f9ede5f85e | 8af71789222675dddd541bafba681143162f4206 | /apps/descuentos/functions.py | 53dd6efeaf6dbe9c64bcf9da39bf5b304bf74a74 | [] | no_license | RubenAlvarenga/nhakanina | b82d23d80e06aaf49693c8fb65a70ee73e130994 | 3e39a522029c9a6cbb455b2e736ce335ebc4bf1d | refs/heads/master | 2021-01-10T15:32:01.550423 | 2016-03-07T17:34:09 | 2016-03-07T17:34:09 | 43,449,047 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from apps.entidades.models import Persona
def es_socio_afemec(request):
try: persona = Persona.objects.get(pk=int(request.POST['id_persona']))
except: persona = Persona.objects.get(pk=int(request.POST['persona']))
if persona.socio_afemec: return True
else: return False
def es_docente_ise(request):
try: persona = Persona.objects.get(pk=int(request.POST['id_persona']))
except: persona = Persona.objects.get(pk=int(request.POST['persona']))
if persona.docente_ise: return True
else: return False | [
"rubenalvarengan@gmail.com"
] | rubenalvarengan@gmail.com |
28e423a7b1759f7093939087b0f412e5562322e7 | f521c77da715d4a1deba79af95e8f95465a9679f | /plot_ZonalTAS_NCEP.py | 22386514675e786724ad9a4873dd003a4deafb99 | [
"MIT"
] | permissive | hairui-hao/ArcticSeaIce | 37e4891d77ad6070e7aa66bfeaf1530be7794384 | 32f630c931e145e1d492aa32a5f32ca3c998876f | refs/heads/master | 2022-03-28T19:25:12.083837 | 2017-10-24T05:25:36 | 2017-10-24T05:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | """
Plots Arctic mean surface temperature (1948-2016) for Jan-month
Website : http://www.esrl.noaa.gov/psd/cgi-bin/data/timeseries/timeseries1.pl
Author : Zachary M. Labe
Date : 15 May 2016
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as c
### Directory and time
directoryfigure = '...' # enter directory
directorydata = '...' # enter directory
### Insert month
month = 'June'
### Retrieve Data
year,temp = np.genfromtxt(directorydata + 'Arctic_Tsurf_Jan%s.txt' % month,
unpack=True)
currentyear = int(year[-1])
### Define parameters (dark)
plt.rc('savefig', facecolor='black')
plt.rc('axes', edgecolor='white')
plt.rc('xtick', color='white')
plt.rc('ytick', color='white')
plt.rc('axes', labelcolor='white')
plt.rc('axes', facecolor='black')
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
### Plot for zonal mean temperature
fig = plt.figure()
ax = plt.subplot(111)
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 10))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Set colormap
cmap = plt.get_cmap('YlGn_r')
cmap2 = plt.get_cmap('PuBuGn')
cmap3 = plt.get_cmap('YlOrBr')
cmaplist = [cmap(i) for i in xrange(cmap.N)]
cmaplist2 = [cmap2(i) for i in xrange(100,cmap2.N)]
cmaplist3 = [cmap3(i) for i in xrange(cmap3.N)]
cms = c.ListedColormap(cmaplist2 + cmaplist + cmaplist3)
cm = plt.get_cmap(cms)
no_points = len(year)
ax.set_color_cycle([cm(1.*i/(no_points-1))
for i in range(no_points-1)])
for i in range(no_points-1):
bar = ax.plot(year[i:i+2],temp[i:i+2],linewidth=3.5,zorder=1)
plt.scatter(year[-1],temp[-1],
s=40,color='r',zorder=2)
ax.tick_params('both',length=7.5,width=2,which='major')
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
plt.subplots_adjust(bottom=0.15)
### y-ticks
plt.yticks(np.arange(int(min(temp))-1,int(max(temp))+1,1),
map(str,np.arange(int(min(temp))-1,int(max(temp))+1,1)),
fontsize=11)
plt.ylabel(r'\textbf{Surface Temperature [$^{\circ}$C]}',fontsize=11)
plt.ylim([int(min(temp))-1,int(max(temp))])
### x-ticks
plt.xticks(np.arange(1950,2021,10),map(str,np.arange(1950,2021,10)),
fontsize=11)
plt.xlabel(r'\textbf{NCEP/NCAR Reanalysis : [Jan-%s] : Arctic, 66N+}' % month,
fontsize=11)
plt.xlim([1948,2020])
### Insert text
plt.text(currentyear-8,int(max(temp)),r'\textbf{You are here!}',
fontsize=11,rotation='horizontal',ha='left',color='r')
plt.text(1999.8,int(min(temp))-0.5,r'Zachary Labe (@ZLabe)',
fontsize=8,rotation='horizontal',ha='left',color='w',
bbox=dict(boxstyle='square,pad=0.3',fc='k',
edgecolor='w',linewidth=0.2))
### Save figure
plt.savefig(directoryfigure + 'ZonalTAS_NCEP_sample.png',dpi=900)
| [
"zml5@cornell.edu"
] | zml5@cornell.edu |
265d659a26b92f71678e40a3fcd28a041cc9a42a | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev1889-1952/base-trunk-1889/exe/engine/attachmentidevice.py | 38aeb4d13f3b951965ace76cba69d9ed7c493573 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | """
An Attachment Idevice allows a file to be attached to a package.
"""
from exe.engine.idevice import Idevice
from exe.engine.path import Path
from exe.engine.translate import lateTranslate
from exe.engine.resource import Resource
import logging
log = logging.getLogger(__name__)
class AttachmentIdevice(Idevice):
"""
An Attachment Idevice allows a file to be attached to a package.
"""
persistenceVersion = 3
def __init__(self):
Idevice.__init__(self,
x_(u"Attachment"),
x_(u"University of Auckland"),
u"",
x_(u"The attachment iDevice is used to attach "
"existing files to your .elp content. For example, "
"you might have a PDF file or a PPT presentation "
"file that you wish the learners to have access "
"to, these can be attached and labeled to indicate "
"what the attachment is and how large the file is. "
"Learners can click on the attachment link and can "
"download the attachment."), u"", u"")
self.emphasis = Idevice.NoEmphasis
self._filenameInstruc = x_(u'Click <strong>Select a file</strong>, '
'browse to the file you want '
'to attach and select it.')
self.label = u""
self._labelInstruc = x_(u"<p>"
"Assign a label for the attachment. It "
"is useful to include the type of file. "
"Eg. pdf, ppt, etc."
"</p>"
"<p>"
"Including the size is also recommended so "
"that after your package is exported "
"to a web site, people will have an idea "
"how long it would take to download this "
"attachment."
"</p>"
"<p>"
"For example: "
"<code>Sales Forecast.doc (500kb)</code>"
"</p>")
self.description = u""
self._descriptionInstruc = x_(u"Provide a brief description of the "
"file")
filenameInstruc = lateTranslate('filenameInstruc')
labelInstruc = lateTranslate('labelInstruc')
descriptionInstruc = lateTranslate('descriptionInstruc')
def setAttachment(self, attachmentPath):
"""
Store the attachment in the package
Needs to be in a package to work.
"""
log.debug(u"setAttachment "+unicode(attachmentPath))
resourceFile = Path(attachmentPath)
assert(self.parentNode,
_('Attachment %s has no parentNode') % self.id)
assert(self.parentNode.package,
_('iDevice %s has no package') % self.parentNode.id)
if resourceFile.isfile():
if self.userResources:
for resource in self.userResources:
resource.delete()
self.userResources = []
self.userResources = [ Resource(self.parentNode.package,
resourceFile) ]
else:
log.error('File %s is not a file' % resourceFile)
def upgradeToVersion1(self):
"""
Upgrades v0.6 to v0.7.
"""
self.lastIdevice = False
def upgradeToVersion2(self):
"""
Upgrades to v0.10
"""
self._upgradeIdeviceToVersion1()
self._filenameInstruc = self.__dict__.get('filenameInstruc', '')
self._labelInstruc = self.__dict__.get('labelInstruc', '')
self._descriptionInstruc = self.__dict__.get('descriptionInstruc', '')
def upgradeToVersion3(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
if self.filename and self.parentNode:
self.userResources = [ Resource(self.parentNode.package,
Path(self.filename)) ]
del self.filename
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
57d10a493096e2eeeb88d8446c7a2a26f7492e49 | ec523d48de9c38e0c5d81ac865d43779117650aa | /apps/school/migrations/0007_auto_20210206_1152.py | f836f0186d39b5ae72c999e876c2070cf52dd30a | [] | no_license | drc-ima/moon-school | b4b82e2c71d14a6b57aa75e36100851015f3696a | 94c7675642583ed4b97a4eb716015510fe93ca84 | refs/heads/master | 2023-03-06T10:22:56.699977 | 2021-02-11T15:28:16 | 2021-02-11T15:28:16 | 334,768,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | # Generated by Django 3.1.6 on 2021-02-06 11:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('school', '0006_auto_20210127_0025'),
]
operations = [
migrations.AlterField(
model_name='holiday',
name='day',
field=models.IntegerField(blank=True, choices=[(11, '11th'), (27, '27th'), (28, '28th'), (12, '12th'), (3, '3rd'), (6, '6th'), (29, '29th'), (5, '5th'), (14, '14th'), (19, '19th'), (25, '25th'), (24, '24th'), (22, '22nd'), (13, '13th'), (10, '10th'), (18, '18th'), (21, '21st'), (9, '9th'), (15, '15th'), (23, '23rd'), (30, '30th'), (2, '2nd'), (20, '20th'), (16, '16th'), (26, '26th'), (31, '31st'), (8, '8th'), (7, '7th'), (17, '17th'), (1, '1st'), (4, '4th')], null=True),
),
migrations.AlterField(
model_name='holiday',
name='month',
field=models.CharField(blank=True, choices=[('7', 'July'), ('6', 'June'), ('10', 'October'), ('12', 'December'), ('8', 'August'), ('4', 'April'), ('3', 'March'), ('11', 'November'), ('9', 'September'), ('5', 'May'), ('1', 'January'), ('2', 'February')], max_length=255, null=True),
),
migrations.CreateModel(
name='PassGrade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('school_id', models.CharField(blank=True, max_length=200, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='pass_grades', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'pass_grade',
},
),
]
| [
"emmanuelofosu472@gmail.com"
] | emmanuelofosu472@gmail.com |
e18554ebd5d4e16b5fa0cf27ef48a8651a8f5eca | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03148/s022846837.py | b6ba33db833753e64e34435339d47116fcc3ac8b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | import sys
n,k=map(int,input().split())
data=[]
for i in range(n):
t,d=map(int,input().split())
data.append([d,t])
data.sort(reverse=True)
flag=[0]*(n+1)
eat=[]
lst=[]
count=0
for u in data:
d,t=u
if flag[t]==0:
eat.append(u)
flag[t]=1
count+=1
if count==k:
break
else:
lst.append(u)
else:
lst.sort(reverse=True)
sum=0
for i in range(n-count):
if i<k-count:
sum+=lst[i][0]
else:
if lst[i][0]>eat[count-1][0]+2*count-1:
eat[count-1]=lst[i]
count-=1
else:
break
for u in eat:
sum+=u[0]
sum+=count**2
print(sum)
sys.exit()
lst.sort(reverse=True)
for u in lst:
d=u[0]
if d>eat[count-1][0]+2*count-1:
eat[count-1]=u
count-=1
else:
break
sum=0
for u in eat:
sum+=u[0]
sum+=count**2
print(sum) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
32671589ba59b738fb6c30283f9d8173ee8e4853 | 48894ae68f0234e263d325470178d67ab313c73e | /sa/apps/reportobjectsummary/tests/test.py | ed8238d78acdc906a1aa1807b5128c5cc4843525 | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 615 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## reportobjectsummary Test
##----------------------------------------------------------------------
## Copyright (C) 2007-2009 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
from noc.lib.test import ReportApplicationTestCase
class reportobjectsummaryTestCase(ReportApplicationTestCase):
posts=[
{"report_type":"profile"},
{"report_type":"domain"},
{"report_type":"tag"},
{"report_type":"domain-profile"},
]
| [
"dvolodin7@gmail.com"
] | dvolodin7@gmail.com |
66ee7e1859ffbbd0dbfd5e1b12d2148f1caa195d | e57d7785276053332c633b57f6925c90ad660580 | /sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/postgresql_flexibleservers/operations/_servers_operations.py | 9ba2a521dd597f13a8c796589610274307c76960 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 47,197 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServersOperations(object):
"""ServersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.postgresql_flexibleservers.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
server_name, # type: str
parameters, # type: "_models.Server"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Server"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Server"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Server')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Server', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
server_name, # type: str
parameters, # type: "_models.Server"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Server"]
"""Creates a new server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The required parameters for creating or updating a server.
:type parameters: ~azure.mgmt.rdbms.postgresql_flexibleservers.models.Server
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Server or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.postgresql_flexibleservers.models.Server]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Server"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
server_name=server_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Server', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
server_name, # type: str
parameters, # type: "_models.ServerForUpdate"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Server"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Server"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServerForUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Server', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
server_name, # type: str
parameters, # type: "_models.ServerForUpdate"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Server"]
"""Updates an existing server. The request body can contain one to many of the properties present
in the normal server definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The required parameters for updating a server.
:type parameters: ~azure.mgmt.rdbms.postgresql_flexibleservers.models.ServerForUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Server or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.postgresql_flexibleservers.models.Server]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Server"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Server', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Server"
"""Gets information about a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Server, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.postgresql_flexibleservers.models.Server
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Server"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Server', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServerListResult"]
"""List all the servers in a given resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.postgresql_flexibleservers.models.ServerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServerListResult"]
"""List all the servers in a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.postgresql_flexibleservers.models.ServerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DBforPostgreSQL/flexibleServers'} # type: ignore
def _restart_initial(
self,
resource_group_name, # type: str
server_name, # type: str
parameters=None, # type: Optional["_models.RestartParameter"]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._restart_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'RestartParameter')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/restart'} # type: ignore
def begin_restart(
self,
resource_group_name, # type: str
server_name, # type: str
parameters=None, # type: Optional["_models.RestartParameter"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Restarts a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param parameters: The parameters for restarting a server.
:type parameters: ~azure.mgmt.rdbms.postgresql_flexibleservers.models.RestartParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._restart_initial(
resource_group_name=resource_group_name,
server_name=server_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/restart'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
server_name=server_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/start'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
server_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
server_name=server_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'serverName': self._serialize.url("server_name", server_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/flexibleServers/{serverName}/stop'} # type: ignore
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
b5321f30fb4be916f71c029472ed6c1fd1b65cc0 | 3844f6dce7967f56585474d96e609da2ee251ed4 | /backend/tony_test_0630_dev_22679/settings.py | 63bec43e250ae0fcdeec769bcb1b50874a5fcd6d | [] | no_license | crowdbotics-apps/tony-test-0630-dev-22679 | 66d13e68dcd8cf48e561984b4118e874af6a2824 | 885bd36fc55149688f46130c0d3606411811123e | refs/heads/master | 2023-06-06T18:17:34.140606 | 2021-06-30T19:37:29 | 2021-06-30T19:37:29 | 381,814,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,209 | py | """
Django settings for tony_test_0630_dev_22679 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tony_test_0630_dev_22679.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tony_test_0630_dev_22679.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
006a06e4872b495391f2cfc6a4209728675663b8 | 9698c064521713bba541dd12963d850990f1e20e | /articles/updatePage.py | c068f85a3646c38954c1d1aab2eae814184302a6 | [] | no_license | mvwicky/mvwicky | b4e1bffa25811d5d3495ee0799b6b475bfd97d28 | 73d40879b126f10bf90049f1b95697c354caea76 | refs/heads/master | 2021-01-02T22:45:51.890908 | 2014-11-15T03:24:33 | 2014-11-15T03:24:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | import sys
def main():
pass
if __name__ =='__main__':
main() | [
"mvanwickle@gmail.com"
] | mvanwickle@gmail.com |
66dd03de2184826c0ac554612f22e8026e2888a4 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/리스트_20200628151653.py | daf2fc9fc4c9d7445937cfe68f4f1d4f67f6b05a | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # 리스트 []
# 지하철 칸별로 10명, 20명, 30명
# subway1 = 10
# subway2 = 20
# subway3 = 30
subway = [10, 20, 30]
print(subway)
subway = ["유재석", "조세호", "박명수"]
print(subway)
# 조세호씨가 몇 번째 칸에 타고 있는가?
print(subway.index("조세호"))
# 다음 정류장에서 하하씨가 탔다.
subway.append("하하")
print(subway)
# 정형돈씨를 유재석 / 조세호 사이에 넣어본다.
subway.insert(1, "정형돈")
print(subway)
# 지하철에 있는 사람을 한 명씩 뒤에서 꺼낸다.
print(subway.pop())
print(subway)
# print(subway.pop())
# print(subway)
# print(subway.pop())
# print(subway)
# 같은 이름의 사람이 몇 명 있는지 확인
subway.append("유재석")
print(subway)
print(subway.count("유재석"))
# 정렬도 가능하다.
num_list = [5, 2, 4, 3, 1]
num_list.sort()
print(num_list)
# 순서 뒤집기 가능
num_list.reverse()
print(num_list)
# 모두 지우기
num_list.clear()
print(num_list)
# 다양한 자료형 함께 사용가능
mix_list = ["조세호", 20, True]
print(mix_list)
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
f95a708b13556e954e95830289cd4ceab5e767c8 | 0db05f7b843e8450bafd5ae23f8f70f9a9a8c151 | /Src/StdLib/Lib/site-packages/win32com/demos/outlookAddin.py | 8e4ced151dd05e5396080c1dc369becfa471b8d6 | [
"BSD-3-Clause",
"Python-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | IronLanguages/ironpython2 | 9c7f85bd8e6bca300e16f8c92f6384cecb979a6a | d00111890ce41b9791cb5bc55aedd071240252c4 | refs/heads/master | 2023-01-21T21:17:59.439654 | 2023-01-13T01:52:15 | 2023-01-13T01:52:15 | 91,620,472 | 1,171 | 288 | Apache-2.0 | 2023-01-13T01:52:16 | 2017-05-17T21:11:51 | Python | UTF-8 | Python | false | false | 4,637 | py | # A demo plugin for Microsoft Outlook (NOT Outlook Express)
#
# This addin simply adds a new button to the main Outlook toolbar,
# and displays a message box when clicked. Thus, it demonstrates
# how to plug in to Outlook itself, and hook outlook events.
#
# Additionally, each time a new message arrives in the Inbox, a message
# is printed with the subject of the message.
#
# To register the addin, simply execute:
# outlookAddin.py
# This will install the COM server, and write the necessary
# AddIn key to Outlook
#
# To unregister completely:
# outlookAddin.py --unregister
#
# To debug, execute:
# outlookAddin.py --debug
#
# Then open Pythonwin, and select "Tools->Trace Collector Debugging Tool"
# Restart Outlook, and you should see some output generated.
#
# NOTE: If the AddIn fails with an error, Outlook will re-register
# the addin to not automatically load next time Outlook starts. To
# correct this, simply re-register the addin (see above)
from win32com import universal
from win32com.server.exception import COMException
from win32com.client import gencache, DispatchWithEvents
import winerror
import pythoncom
from win32com.client import constants
import sys
# Support for COM objects we use.
gencache.EnsureModule('{00062FFF-0000-0000-C000-000000000046}', 0, 9, 0, bForDemand=True) # Outlook 9
gencache.EnsureModule('{2DF8D04C-5BFA-101B-BDE5-00AA0044DE52}', 0, 2, 1, bForDemand=True) # Office 9
# The TLB defiining the interfaces we implement
universal.RegisterInterfaces('{AC0714F2-3D04-11D1-AE7D-00A0C90F26F4}', 0, 1, 0, ["_IDTExtensibility2"])
class ButtonEvent:
def OnClick(self, button, cancel):
import win32ui # Possible, but not necessary, to use a Pythonwin GUI
win32ui.MessageBox("Hello from Python")
return cancel
class FolderEvent:
def OnItemAdd(self, item):
try:
print "An item was added to the inbox with subject:", item.Subject
except AttributeError:
print "An item was added to the inbox, but it has no subject! - ", repr(item)
class OutlookAddin:
_com_interfaces_ = ['_IDTExtensibility2']
_public_methods_ = []
_reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER
_reg_clsid_ = "{0F47D9F3-598B-4d24-B7E3-92AC15ED27E2}"
_reg_progid_ = "Python.Test.OutlookAddin"
_reg_policy_spec_ = "win32com.server.policy.EventHandlerPolicy"
def OnConnection(self, application, connectMode, addin, custom):
print "OnConnection", application, connectMode, addin, custom
# ActiveExplorer may be none when started without a UI (eg, WinCE synchronisation)
activeExplorer = application.ActiveExplorer()
if activeExplorer is not None:
bars = activeExplorer.CommandBars
toolbar = bars.Item("Standard")
item = toolbar.Controls.Add(Type=constants.msoControlButton, Temporary=True)
# Hook events for the item
item = self.toolbarButton = DispatchWithEvents(item, ButtonEvent)
item.Caption="Python"
item.TooltipText = "Click for Python"
item.Enabled = True
# And now, for the sake of demonstration, setup a hook for all new messages
inbox = application.Session.GetDefaultFolder(constants.olFolderInbox)
self.inboxItems = DispatchWithEvents(inbox.Items, FolderEvent)
def OnDisconnection(self, mode, custom):
print "OnDisconnection"
def OnAddInsUpdate(self, custom):
print "OnAddInsUpdate", custom
def OnStartupComplete(self, custom):
print "OnStartupComplete", custom
def OnBeginShutdown(self, custom):
print "OnBeginShutdown", custom
def RegisterAddin(klass):
import _winreg
key = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins")
subkey = _winreg.CreateKey(key, klass._reg_progid_)
_winreg.SetValueEx(subkey, "CommandLineSafe", 0, _winreg.REG_DWORD, 0)
_winreg.SetValueEx(subkey, "LoadBehavior", 0, _winreg.REG_DWORD, 3)
_winreg.SetValueEx(subkey, "Description", 0, _winreg.REG_SZ, klass._reg_progid_)
_winreg.SetValueEx(subkey, "FriendlyName", 0, _winreg.REG_SZ, klass._reg_progid_)
def UnregisterAddin(klass):
import _winreg
try:
_winreg.DeleteKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Office\\Outlook\\Addins\\" + klass._reg_progid_)
except WindowsError:
pass
if __name__ == '__main__':
import win32com.server.register
win32com.server.register.UseCommandLine(OutlookAddin)
if "--unregister" in sys.argv:
UnregisterAddin(OutlookAddin)
else:
RegisterAddin(OutlookAddin)
| [
"pawel.jasinski@gmail.com"
] | pawel.jasinski@gmail.com |
0d9d5c69e0b64bcac12dfcaab64cabf8c3418c54 | ddae7c01b3bc30a9a06f53a7b9f02ae332f096b8 | /project4/urls.py | 743a715759fe8aee2cf5580b054b12d240abb8b1 | [] | no_license | harshadvali11/templates | 92db65eb7cb1ae725b2e3085db66b01cda606df5 | ab6fb93ab8ddb642a9eb13ad20306b2f43cefaa0 | refs/heads/master | 2022-09-09T23:44:09.388543 | 2020-06-03T15:24:44 | 2020-06-03T15:24:44 | 268,705,736 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | """project4 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('temp/',views.temp,name='temp'),
path('select/',views.select,name='select'),
]
| [
"66057460+harshadvali11@users.noreply.github.com"
] | 66057460+harshadvali11@users.noreply.github.com |
c8a7d31b96a73a8e2c8054f70ecfe0934ec53a43 | 9ab6875589442c7c27f1796de06c6711ffb34d1f | /torchelastic/distributed/launch.py | ecf5cb61b3d7063b2aa7890336b394a8f7ae1875 | [
"BSD-3-Clause"
] | permissive | pytorch/elastic | 4acd9569cc19dcc8a843cc37857008bd55e6ee44 | bc88e6982961d4117e53c4c8163ecf277f35c2c5 | refs/heads/master | 2023-08-29T22:53:09.679927 | 2022-06-15T12:59:47 | 2022-06-15T12:59:47 | 212,205,845 | 777 | 110 | BSD-3-Clause | 2022-05-03T20:08:46 | 2019-10-01T21:48:15 | Python | UTF-8 | Python | false | false | 503 | py | #!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["LOGLEVEL"] = "INFO"
# Since logger initialized during imoprt statement
# the log level should be set first
from torch.distributed.run import main as run_main
def main(args=None) -> None:
run_main(args)
if __name__ == "__main__":
main()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
374b320f5723b0490a6397f0304381c5211b3066 | c475cd8531a94ffae69cc92371d41531dbbddb6c | /Projects/bullet3-2.89/examples/pybullet/gym/pybullet_envs/minitaur/envs/vector_pb2.py | 400c6c80572966620ae0e1c075ec27f843221c6c | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"Zlib"
] | permissive | WolfireGames/overgrowth | 72d3dd29cbd7254337265c29f8de3e5c32400114 | 594a2a4f9da0855304ee8cd5335d042f8e954ce1 | refs/heads/main | 2023-08-15T19:36:56.156578 | 2023-05-17T08:17:53 | 2023-05-17T08:20:36 | 467,448,492 | 2,264 | 245 | Apache-2.0 | 2023-05-09T07:29:58 | 2022-03-08T09:38:54 | C++ | UTF-8 | Python | false | true | 25,335 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vector.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='vector.proto',
package='robotics.messages',
syntax='proto3',
serialized_pb=_b(
'\n\x0cvector.proto\x12\x11robotics.messages\"6\n\x08Vector4d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\x12\t\n\x01w\x18\x04 \x01(\x01\"6\n\x08Vector4f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\x12\t\n\x01w\x18\x04 \x01(\x02\"+\n\x08Vector3d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"+\n\x08Vector3f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\" \n\x08Vector2d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\" \n\x08Vector2f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"\x1b\n\x07Vectord\x12\x10\n\x04\x64\x61ta\x18\x01 \x03(\x01\x42\x02\x10\x01\"\x1b\n\x07Vectorf\x12\x10\n\x04\x64\x61ta\x18\x01 \x03(\x02\x42\x02\x10\x01\x62\x06proto3'
))
_VECTOR4D = _descriptor.Descriptor(
name='Vector4d',
full_name='robotics.messages.Vector4d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='x',
full_name='robotics.messages.Vector4d.x',
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='y',
full_name='robotics.messages.Vector4d.y',
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='z',
full_name='robotics.messages.Vector4d.z',
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='w',
full_name='robotics.messages.Vector4d.w',
index=3,
number=4,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=35,
serialized_end=89,
)
_VECTOR4F = _descriptor.Descriptor(
name='Vector4f',
full_name='robotics.messages.Vector4f',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='x',
full_name='robotics.messages.Vector4f.x',
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='y',
full_name='robotics.messages.Vector4f.y',
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='z',
full_name='robotics.messages.Vector4f.z',
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='w',
full_name='robotics.messages.Vector4f.w',
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=91,
serialized_end=145,
)
_VECTOR3D = _descriptor.Descriptor(
name='Vector3d',
full_name='robotics.messages.Vector3d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='x',
full_name='robotics.messages.Vector3d.x',
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='y',
full_name='robotics.messages.Vector3d.y',
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='z',
full_name='robotics.messages.Vector3d.z',
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=147,
serialized_end=190,
)
_VECTOR3F = _descriptor.Descriptor(
name='Vector3f',
full_name='robotics.messages.Vector3f',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='x',
full_name='robotics.messages.Vector3f.x',
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='y',
full_name='robotics.messages.Vector3f.y',
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='z',
full_name='robotics.messages.Vector3f.z',
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=192,
serialized_end=235,
)
_VECTOR2D = _descriptor.Descriptor(
name='Vector2d',
full_name='robotics.messages.Vector2d',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='x',
full_name='robotics.messages.Vector2d.x',
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='y',
full_name='robotics.messages.Vector2d.y',
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=237,
serialized_end=269,
)
_VECTOR2F = _descriptor.Descriptor(
name='Vector2f',
full_name='robotics.messages.Vector2f',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='x',
full_name='robotics.messages.Vector2f.x',
index=0,
number=1,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(name='y',
full_name='robotics.messages.Vector2f.y',
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=271,
serialized_end=303,
)
_VECTORD = _descriptor.Descriptor(
name='Vectord',
full_name='robotics.messages.Vectord',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='data',
full_name='robotics.messages.Vectord.data',
index=0,
number=1,
type=1,
cpp_type=5,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b('\020\001')),
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=305,
serialized_end=332,
)
_VECTORF = _descriptor.Descriptor(
name='Vectorf',
full_name='robotics.messages.Vectorf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(name='data',
full_name='robotics.messages.Vectorf.data',
index=0,
number=1,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(
descriptor_pb2.FieldOptions(), _b('\020\001')),
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=334,
serialized_end=361,
)
DESCRIPTOR.message_types_by_name['Vector4d'] = _VECTOR4D
DESCRIPTOR.message_types_by_name['Vector4f'] = _VECTOR4F
DESCRIPTOR.message_types_by_name['Vector3d'] = _VECTOR3D
DESCRIPTOR.message_types_by_name['Vector3f'] = _VECTOR3F
DESCRIPTOR.message_types_by_name['Vector2d'] = _VECTOR2D
DESCRIPTOR.message_types_by_name['Vector2f'] = _VECTOR2F
DESCRIPTOR.message_types_by_name['Vectord'] = _VECTORD
DESCRIPTOR.message_types_by_name['Vectorf'] = _VECTORF
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Vector4d = _reflection.GeneratedProtocolMessageType(
'Vector4d',
(_message.Message,),
dict(DESCRIPTOR=_VECTOR4D,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vector4d)
))
_sym_db.RegisterMessage(Vector4d)
Vector4f = _reflection.GeneratedProtocolMessageType(
'Vector4f',
(_message.Message,),
dict(DESCRIPTOR=_VECTOR4F,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vector4f)
))
_sym_db.RegisterMessage(Vector4f)
Vector3d = _reflection.GeneratedProtocolMessageType(
'Vector3d',
(_message.Message,),
dict(DESCRIPTOR=_VECTOR3D,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vector3d)
))
_sym_db.RegisterMessage(Vector3d)
Vector3f = _reflection.GeneratedProtocolMessageType(
'Vector3f',
(_message.Message,),
dict(DESCRIPTOR=_VECTOR3F,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vector3f)
))
_sym_db.RegisterMessage(Vector3f)
Vector2d = _reflection.GeneratedProtocolMessageType(
'Vector2d',
(_message.Message,),
dict(DESCRIPTOR=_VECTOR2D,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vector2d)
))
_sym_db.RegisterMessage(Vector2d)
Vector2f = _reflection.GeneratedProtocolMessageType(
'Vector2f',
(_message.Message,),
dict(DESCRIPTOR=_VECTOR2F,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vector2f)
))
_sym_db.RegisterMessage(Vector2f)
Vectord = _reflection.GeneratedProtocolMessageType(
'Vectord',
(_message.Message,),
dict(DESCRIPTOR=_VECTORD,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vectord)
))
_sym_db.RegisterMessage(Vectord)
Vectorf = _reflection.GeneratedProtocolMessageType(
'Vectorf',
(_message.Message,),
dict(DESCRIPTOR=_VECTORF,
__module__='vector_pb2'
# @@protoc_insertion_point(class_scope:robotics.messages.Vectorf)
))
_sym_db.RegisterMessage(Vectorf)
_VECTORD.fields_by_name['data'].has_options = True
_VECTORD.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(),
_b('\020\001'))
_VECTORF.fields_by_name['data'].has_options = True
_VECTORF.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(),
_b('\020\001'))
# @@protoc_insertion_point(module_scope)
| [
"max@autious.net"
] | max@autious.net |
cb0027a369d0113ba738205dedcaf8a48f3b7e4e | 82256676fd6b857b7a9fd54ab339d390c1364ab0 | /py/testdir_0xdata_only/test_parse_airline_multi_hdfs_many.py | 1e17b907d870ce16f8b97798c409591f80397d9d | [
"Apache-2.0"
] | permissive | ivanliu1989/h2o | 8d0def46c070e78718ba13761f20ef2187545543 | e00b367df0a33c400ae33bc869a236f254f625ed | refs/heads/master | 2023-04-27T20:40:16.666618 | 2014-10-23T02:12:36 | 2014-10-23T02:12:36 | 25,618,199 | 0 | 0 | Apache-2.0 | 2023-04-15T23:24:35 | 2014-10-23T03:49:40 | null | UTF-8 | Python | false | false | 3,566 | py | import unittest, sys, random, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_hosts, h2o_jobs as h2j
RANDOM_UDP_DROP = False
if RANDOM_UDP_DROP:
print "random_udp_drop!!"
# NAME_NODE = 'mr-0x6'
# VERSION = 'cdh4'
NAME_NODE = 'mr-0xd6'
VERSION = 'hdp2.1'
TRIAL_MAX = 10
print "Using", VERSION, "on", NAME_NODE
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
pass
print "Will build clouds with incrementing heap sizes and import folder/parse"
@classmethod
def tearDownClass(cls):
# the node state is gone when we tear down the cloud, so pass the ignore here also.
h2o.tear_down_cloud(sandboxIgnoreErrors=True)
def test_parse_airline_multi_hdfs_many(self):
h2o.beta_features = True
# default
csvFilename = "hex_10"
csvFilePattern = '*' # all files in the folder
for tryHeap in [24]:
print "\n", tryHeap,"GB heap, 1 jvm per host, import mr-0x6 hdfs, then parse"
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(java_heap_GB=tryHeap, random_udp_drop=RANDOM_UDP_DROP,
use_hdfs=True, hdfs_name_node=NAME_NODE, hdfs_version=VERSION)
else:
h2o_hosts.build_cloud_with_hosts(java_heap_GB=tryHeap, random_udp_drop=RANDOM_UDP_DROP, disable_assertions=False,
use_hdfs=True, hdfs_name_node=NAME_NODE, hdfs_version=VERSION)
# don't raise exception if we find something bad in h2o stdout/stderr?
# h2o.nodes[0].sandboxIgnoreErrors = True
timeoutSecs = 500
importFolderPath = "datasets/airlines_multi"
csvPathname = importFolderPath + "/" + csvFilePattern
parseResult = h2i.import_only(path=csvPathname, schema='hdfs',
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60)
for trial in range(TRIAL_MAX):
# each parse now just does one
csvFilePattern = "*%s.csv" % trial
# if we want multifile
# csvFilePattern = "*"
hex_key = csvFilename + "_" + str(trial) + ".hex"
csvPathname = importFolderPath + "/" + csvFilePattern
start = time.time()
# print "Don't wait for completion. Just load things up!"
print "Drat. the source file is locked if we noPoll. Would have to increment across the individual files?"
print "Drat. We can't re-import the folder, if there's a parse using one of the source files?"
parseResult = h2i.parse_only(pattern=csvFilePattern, hex_key=hex_key, noPoll=True, delete_on_done=0,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print "parse result:", parseResult['destination_key']
print "Parse #", trial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_cmd.runStoreView()
# we don't delete the hex key. it will start spilling? slow
h2j.pollWaitJobs(timeoutSecs=300, pollTimeoutSecs=30)
h2o.tear_down_cloud()
# sticky ports? wait a bit.
time.sleep(5)
if __name__ == '__main__':
h2o.unit_main()
| [
"kevin@0xdata.com"
] | kevin@0xdata.com |
e4b92222521611368b819e3349ccd7734bf50406 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /leetcode-cn/sword2offer/000剑指0_Offer_04._二维数组中的查找.py | feea5d7d328dfdb05f6f63ef45fde22a96c9592e | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,662 | py | '''
from up-right to bottom-left
T: O(M + N)
S: O(1)
执行用时:48 ms, 在所有 Python3 提交中击败了17.63%的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了45.87%的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
i, j = 0, m - 1
while i < n and j >= 0:
if matrix[i][j] < target:
i += 1
elif matrix[i][j] > target:
j -= 1
else:
return True
return False
'''
from bottom-left to up-right
T: O(M + N)
S: O(1)
执行用时:44 ms, 在所有 Python3 提交中击败了34.66% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了43.89% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
i, j = n - 1, 0
while i >= 0 and j < m:
if matrix[i][j] > target:
i -= 1
elif matrix[i][j] < target:
j += 1
else:
return True
return False
'''
brute force
T: O(MN)
S: O(1)
执行用时:44 ms, 在所有 Python3 提交中击败了34.66% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了45.27% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
for row in matrix:
for x in row:
if x == target:
return True
return False
'''
binary search every row
T: O(NlogM)
S: O(1)
执行用时:40 ms, 在所有 Python3 提交中击败了58.27% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了26.42% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
m = len(matrix[0]) if matrix else 0
for row in matrix:
idx = bisect_left(row, target)
if idx < m and row[idx] == target:
return True
return False
'''
binary search every column
T: O(MlogN)
S: O(1)
执行用时:52 ms, 在所有 Python3 提交中击败了8.55% 的用户
内存消耗:19 MB, 在所有 Python3 提交中击败了63.71% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n = len(matrix)
for col in zip(*matrix):
idx = bisect_left(col, target)
if idx < n and col[idx] == target:
return True
return False
'''
binary search by the longer one (row/column)
T: O( min(N,M)*log(max(N,M)) )
S: O(1)
执行用时:40 ms, 在所有 Python3 提交中击败了58.27% 的用户
内存消耗:19.2 MB, 在所有 Python3 提交中击败了12.85% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
if n < m:
for row in matrix:
idx = bisect_left(row, target)
if idx < m and row[idx] == target:
return True
else:
for col in zip(*matrix):
idx = bisect_left(col, target)
if idx < n and col[idx] == target:
return True
return False
'''
binary search by the longer one (row/column)
T: O( min(N,M)*log(max(N,M)) )
S: O(1)
执行用时:40 ms, 在所有 Python3 提交中击败了58.27% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了35.85% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
if n > m:
matrix = zip(*matrix)
for longer in matrix:
idx = bisect_left(longer, target)
if idx < max(n, m) and longer[idx] == target:
return True
return False
'''
brute force
执行用时:48 ms, 在所有 Python3 提交中击败了17.63% 的用户
内存消耗:19.1 MB, 在所有 Python3 提交中击败了31.53% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
return any(target in row for row in matrix)
'''
divide and conquer, cut to four parts + binary search
执行用时:48 ms, 在所有 Python3 提交中击败了17.63% 的用户
内存消耗:20.2 MB, 在所有 Python3 提交中击败了5.10% 的用户
通过测试用例:129 / 129
'''
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
n, m = len(matrix), len(matrix[0]) if matrix else 0
def isExist(i1, j1, i2, j2):
if i1 > i2 or j1 > j2:
return False
if i1 == i2:
# matrix[i1][j1: j2 + 1]
lo, hi = j1, j2
while lo <= hi:
mid = (lo + hi) // 2
if matrix[i1][mid] == target:
return True
elif matrix[i1][mid] > target:
hi = mid - 1
else:
lo = mid + 1
return False
elif j1 == j2:
# matrix[i1: i2 + 1][j1]
lo, hi = i1, i2
while lo <= hi:
mid = (lo + hi) // 2
if matrix[mid][j1] == target:
return True
elif matrix[mid][j1] > target:
hi = mid - 1
else:
lo = mid + 1
return False
# cut to four parts
midi, midj = (i1 + i2) // 2, (j1 + j2) // 2
if matrix[midi][midj] == target:
return True
elif matrix[midi][midj] > target:
return isExist(i1, j1, midi, midj) or \
isExist(i1, midj + 1, midi, j2) or \
isExist(midi + 1, j1, i2, midj)
else:
return isExist(midi + 1, midj + 1, i2, j2) or \
isExist(i1, midj + 1, midi, j2) or \
isExist(midi + 1, j1, i2, midj)
return isExist(0, 0, n - 1, m - 1)
| [
"laoxing201314@outlook.com"
] | laoxing201314@outlook.com |
caa343d8d4edab26a54015730302a0d32560c803 | a372a816373d63ad626a9947077e137eac2e6daf | /pyquiz/leetcode/WordLadder.py | b6f0657c1efc7f109343211ef78947d12c497129 | [] | no_license | DmitryPukhov/pyquiz | 07d33854a0e04cf750b925d2c399dac8a1b35363 | 8ae84f276cd07ffdb9b742569a5e32809ecc6b29 | refs/heads/master | 2021-06-13T14:28:51.255385 | 2021-06-13T08:19:36 | 2021-06-13T08:19:36 | 199,842,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | import collections
from collections import deque, defaultdict
from typing import List, Set
class WordLadder:
"""
127. Word Ladder
Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest
transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time.
Each transformed word must exist in the word list.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
Example 1:
Input:
beginWord = "hit",
endWord = "cog",
wordList = ["hot","dot","dog","lot","log","cog"]
Output: 5
Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Example 2:
Input:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
Output: 0
Explanation: The endWord "cog" is not in wordList, therefore no possible transformation.
"""
def ladderLength(self, beginWord, endWord, wordList):
"""
For each word create word->transformations and transformation->word dictionaries to not calculate it every cycle
Then do BFA from beginWord until we transform it to endWord.
"""
if endWord not in wordList:
return 0
# Fill in transformations dictionaries
wtdict = defaultdict(set)
twdict = defaultdict(set)
for w in [beginWord] + wordList:
for i in range(len(beginWord)):
trans = w[:i] + '*' + w[i + 1:]
wtdict[w].add(trans)
if w != beginWord:
twdict[trans].add(w)
# BFS considering transformation dictionaries
q = deque()
q.append((beginWord, 1))
visited = set()
while q:
w, level = q.popleft()
if w == endWord:
return level
if w in visited:
continue
for t in wtdict[w]:
nextwords = twdict[t]
q.extend([(nw, level+1) for nw in nextwords])
visited.add(w)
return 0
| [
"dmitry.pukhov@gmail.com"
] | dmitry.pukhov@gmail.com |
925d5b3d47f5b7bcf1fc083d4694149fae7043f5 | 9399d687b2e41245968ba0e9d413a6789d773b1d | /CI/python-coroutine/download_server/scheduler.py | 095d52c381995a12aa0a59eb12fc4de1054779f9 | [] | no_license | jiangliu888/DemoForSpeed | be41bdb85a1d1f5ca9350a3a1f681ced5ec9b929 | 11319bc19c074327d863ac2813a04cef3487f8d6 | refs/heads/main | 2023-08-23T14:16:21.686155 | 2021-10-17T12:01:34 | 2021-10-17T12:01:34 | 388,452,435 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,573 | py | # -*- encoding=utf-8 -*-
import os
import prettytable
import utils
from const import CalcType
from modules.downloader import Downloader
from modules.hasher import Hasher
from modules.storager import Storager
class Scheduler:
"""调度模块
"""
def __init__(self):
self.downloader = Downloader()
self.hasher = Hasher()
self.storager = Storager()
def _wrap_path(self, md5):
filename = '{}.jpg'.format(md5)
STORAGE_PATH = os.path.join('.', 'images')
path = os.path.join(STORAGE_PATH, filename)
return path
def set_calc_type(self, type_):
self.downloader.set_calc_type(type_)
self.hasher.set_calc_type(type_)
self.storager.set_calc_type(type_)
def process(self):
time_statictics = {}
time_statictics['network_time'] = []
time_statictics['cpu_time'] = []
time_statictics['disk_time'] = []
timer = utils.Timer()
# 1. 加载图片url列表
url_list = utils.urllist()
# 2. 调度下载模块
timer.tick()
content_list = self.downloader.process(url_list)
time_cost = timer.tock()
time_statictics['network_time'].append(time_cost)
# 3. 调度哈希模块
timer.tick()
md5_list = self.hasher.process(content_list)
time_cost = timer.tock()
time_statictics['cpu_time'].append(time_cost)
# 4. 调度存储模块
item_list = []
for content, md5 in zip(content_list, md5_list):
path = self._wrap_path(md5)
item = (content, path)
item_list.append(item)
timer.tick()
self.storager.process(item_list)
time_cost = timer.tock()
time_statictics['disk_time'].append(time_cost)
return time_statictics
def statictics(self, single_log, multi_log, multiprocess_log, pycoroutine_log):
table = prettytable.PrettyTable(['类型', '单线程总耗时', '多线程总耗时', '多线程提升率', '多进程总耗时', '多进程提升率', '协程总耗时', '协程提升率'])
network_row = ['network']
cpu_row = ['cpu']
disk_row = ['disk']
# 单线程数据
network_row.append(single_log['network_time'][0])
cpu_row.append(single_log['cpu_time'][0])
disk_row.append(single_log['disk_time'][0])
# 多线程数据
network_row.append(multi_log['network_time'][0])
cpu_row.append(multi_log['cpu_time'][0])
disk_row.append(multi_log['disk_time'][0])
# 多线程提升率
time_ = single_log['network_time'][0] - multi_log['network_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['network_time'][0]) * 100)
network_row.append(lift_rate)
time_ = single_log['cpu_time'][0] - multi_log['cpu_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['cpu_time'][0]) * 100)
cpu_row.append(lift_rate)
time_ = single_log['disk_time'][0] - multi_log['disk_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['disk_time'][0]) * 100)
disk_row.append(lift_rate)
# 多进程数据
network_row.append(multiprocess_log['network_time'][0])
cpu_row.append(multiprocess_log['cpu_time'][0])
disk_row.append(multiprocess_log['disk_time'][0])
# 多进程提升率
time_ = single_log['network_time'][0] - multiprocess_log['network_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['network_time'][0]) * 100)
network_row.append(lift_rate)
time_ = single_log['cpu_time'][0] - multiprocess_log['cpu_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['cpu_time'][0]) * 100)
cpu_row.append(lift_rate)
time_ = single_log['disk_time'][0] - multiprocess_log['disk_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['disk_time'][0]) * 100)
disk_row.append(lift_rate)
# 协程运行数据
network_row.append(pycoroutine_log['network_time'][0])
cpu_row.append(pycoroutine_log['cpu_time'][0])
disk_row.append(pycoroutine_log['disk_time'][0])
# 协程运行提升率
time_ = single_log['network_time'][0] - pycoroutine_log['network_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['network_time'][0]) * 100)
network_row.append(lift_rate)
time_ = single_log['cpu_time'][0] - pycoroutine_log['cpu_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['cpu_time'][0]) * 100)
cpu_row.append(lift_rate)
time_ = single_log['disk_time'][0] - pycoroutine_log['disk_time'][0]
lift_rate = '%.4f%%' % ((time_ / single_log['disk_time'][0]) * 100)
disk_row.append(lift_rate)
table.add_row(network_row)
table.add_row(cpu_row)
table.add_row(disk_row)
print(table)
if __name__ == '__main__':
scheduler = Scheduler()
# 单线程运行
scheduler.set_calc_type(CalcType.SingleThread)
singlethread_time = scheduler.process()
# 多线程运行
scheduler.set_calc_type(CalcType.MultiThread)
multithread_time = scheduler.process()
# 多进程运行
scheduler.set_calc_type(CalcType.MultiProcess)
multiprocess_time = scheduler.process()
# 协程运行
scheduler.set_calc_type(CalcType.PyCoroutine)
pyprocess_time = scheduler.process()
# 合并数据
scheduler.statictics(singlethread_time, multithread_time, multiprocess_time, pyprocess_time)
| [
"admin@example.com"
] | admin@example.com |
e31fc49bffcfe984a800f9e7b2471b955301d6ae | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02901/s081848102.py | c85e2160af148705771e000edbc7bda04900a5b9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | def main():
N, M = map(int, input().split())
inf = 10**9
p = 1<<N
dp = [inf]*p
dp[0] = 0
for i in range(M):
a, b = map(int, input().split())
C = list(map(int, input().split()))
c_bit = sum([1<<(c-1) for c in C])
for j in range(p):
#print(min(dp[i][j|c_bit], dp[i][j] + a))
dp[j|c_bit] = min(dp[j|c_bit], dp[j] + a)
if dp[p-1] == inf:
print(-1)
else:
print(int(dp[p-1]))
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8009bc9f9d4141df5d051625b246946cb0fc9952 | 6c418592d5f6ab598dbba3451ab6be3be11ece0a | /env/bin/thresholder.py | 4a23a3afa73c7133de719519cd42fa85dc8f249c | [] | no_license | maxhasan882/Online | 0b9b77ed6fef8462b9d7544487e510833353462b | 53f33b8d39c6d546b7b3da13c5dc8d021f38e263 | refs/heads/master | 2022-11-27T22:05:13.740413 | 2019-12-28T05:09:20 | 2019-12-28T05:09:20 | 230,561,289 | 1 | 0 | null | 2022-11-22T01:40:38 | 2019-12-28T05:07:42 | Python | UTF-8 | Python | false | false | 1,914 | py | #!/home/hasan/PycharmProjects/wiz_sns_updated/env/bin/python
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates how a 1-bit BitmapImage can be used
# as a dynamically updated overlay
#
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
#
# an image viewer
class UI(Frame):
def __init__(self, master, im, value=128):
Frame.__init__(self, master)
self.image = im
self.value = value
self.canvas = Canvas(self, width=im.size[0], height=im.size[1])
self.backdrop = ImageTk.PhotoImage(im)
self.canvas.create_image(0, 0, image=self.backdrop, anchor=NW)
self.canvas.pack()
scale = Scale(self, orient=HORIZONTAL, from_=0, to=255,
resolution=1, command=self.update_scale, length=256)
scale.set(value)
scale.bind("<ButtonRelease-1>", self.redraw)
scale.pack()
# uncomment the following line for instant feedback (might
# be too slow on some platforms)
# self.redraw()
def update_scale(self, value):
self.value = float(value)
self.redraw()
def redraw(self, event=None):
# create overlay (note the explicit conversion to mode "1")
im = self.image.point(lambda v, t=self.value: v >= t, "1")
self.overlay = ImageTk.BitmapImage(im, foreground="green")
# update canvas
self.canvas.delete("overlay")
self.canvas.create_image(0, 0, image=self.overlay, anchor=NW,
tags="overlay")
# --------------------------------------------------------------------
# main
if len(sys.argv) != 2:
print("Usage: thresholder file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "L":
im = im.convert("L")
# im.thumbnail((320,200))
UI(root, im).pack()
root.mainloop()
| [
"rhmithu50@gmail.com"
] | rhmithu50@gmail.com |
37829dfedbbb50d23a8a2bfa1dacda84f5575b25 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /components/breadcrumbs/DEPS | dd3a8164073471de8250f98e1c77abfbbaf839ac | [
"BSD-3-Clause"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 187 | include_rules = [
"+base",
"+components/crash/core/common/crash_key.h",
"+components/infobars/core",
"+components/keyed_service/core",
"+net/base/net_errors.h",
"+ui/base",
]
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com | |
808f5e2470653a06b31ae8ad6859d95156e9b594 | dce02a47c3c7f5affc2f0628fb0c21c8582b9b12 | /src/Microsoft_LexicographicallySmallestString.py | 53abb0caea17e33a8d0445683a73cb0b7fb82d2c | [] | no_license | varshajayaraman/SheCodesInPython | c7e6a9b3441f14bf6ebe31f7cc0c1e39eb502020 | d71327e16fdf2702542ec585fd6eb48a9a0dc8d0 | refs/heads/master | 2021-06-19T19:01:41.054234 | 2021-06-11T04:19:32 | 2021-06-11T04:19:32 | 218,898,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | def sol(strs):
res=""
i=0
while i < len(strs)-1:
if strs[i]>strs[i+1]:
break
else:
res+=strs[i]
i+=1
if i == len(strs)-1:
return res
for j in range(i+1, len(strs)):
res+=strs[j]
return res | [
"vjayar6@uic.edu"
] | vjayar6@uic.edu |
30c605defe051db8df4ec0debf6d86f4a0faf4aa | a929ce53417526ea532607450fed0a75a1134ba3 | /albumy/extensions.py | df75477d29d3cddff3ff1fdfcc4d618f8f701161 | [
"MIT"
] | permissive | tbshow/albumy | 8da9b42f0e199313317cd3ab9ea8681c7a9db1fb | 18ccd51a8ecbeecdea0f693beb10dbba75be09e4 | refs/heads/master | 2022-11-16T12:34:14.553618 | 2020-07-04T04:11:38 | 2020-07-04T04:11:42 | 276,822,110 | 0 | 0 | MIT | 2020-07-03T06:12:32 | 2020-07-03T06:12:31 | null | UTF-8 | Python | false | false | 1,314 | py | # -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <withlihui@gmail.com>
:license: MIT, see LICENSE for more details.
"""
from flask_avatars import Avatars
from flask_bootstrap import Bootstrap
from flask_dropzone import Dropzone
from flask_login import LoginManager, AnonymousUserMixin
from flask_mail import Mail
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import CSRFProtect
bootstrap = Bootstrap()
db = SQLAlchemy()
login_manager = LoginManager()
mail = Mail()
dropzone = Dropzone()
moment = Moment()
avatars = Avatars()
csrf = CSRFProtect()
@login_manager.user_loader
def load_user(user_id):
from albumy.models import User
user = User.query.get(int(user_id))
return user
login_manager.login_view = 'auth.login'
# login_manager.login_message = 'Your custom message'
login_manager.login_message_category = 'warning'
login_manager.refresh_view = 'auth.re_authenticate'
# login_manager.needs_refresh_message = 'Your custom message'
login_manager.needs_refresh_message_category = 'warning'
class Guest(AnonymousUserMixin):
def can(self, permission_name):
return False
@property
def is_admin(self):
return False
login_manager.anonymous_user = Guest
| [
"withlihui@gmail.com"
] | withlihui@gmail.com |
b7e37eec06094261814cdf3bc44dd856aa98368d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ET2voBkuSPLb3mFSD_10.py | bc52bfbef9c3e6d242755af581c701c41dc92e22 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py |
def sum_every_nth(numbers, n):
l = len(numbers)
index_list = []
for i in range(l):
k = i+1
if k % n == 0:
index_list.append(i)
count = 0
for location in index_list:
count += int(numbers[location])
return(count)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
40a80f492736a92b65f5641c675ab9c35eb15034 | 2f15fdd78643f449f2749b31a3f8c84f2d4703b5 | /braintree/android_pay_card.py | 33c56f0b7be0ee70891583fddee9e98c2bebc78b | [
"MIT"
] | permissive | bhargavrpatel/braintree_python | 131fbbf5c9f1aaa0f37dec2f435e469e5f05ae05 | 89219d691eafe763a09ad28f04400c4fcef4991e | refs/heads/master | 2020-03-28T23:50:48.283637 | 2019-05-21T15:07:41 | 2019-05-21T15:07:41 | 149,311,839 | 1 | 2 | MIT | 2019-05-06T01:15:34 | 2018-09-18T15:34:12 | Python | UTF-8 | Python | false | false | 789 | py | import braintree
from braintree.resource import Resource
class AndroidPayCard(Resource):
"""
A class representing Braintree Android Pay card objects.
"""
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if hasattr(self, 'expired'):
self.is_expired = self.expired
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
@property
def expiration_date(self):
return self.expiration_month + "/" + self.expiration_year
@property
def last_4(self):
return self.virtual_card_last_4
@property
def card_type(self):
return self.virtual_card_type
| [
"code@getbraintree.com"
] | code@getbraintree.com |
cd325c42334ca0c47afcfd380347919d3b16607b | 3f46af2da32d9f02d1ebbdef6784ece1d64aace3 | /Production/python/PrivateSamples/EMJ_2016_mMed-1500_mDark-10_ctau-0p1_unflavored-down_cff.py | c47c46d40e0cf697bb2cd8e8fbee4e1818232468 | [] | no_license | cms-svj/TreeMaker | 53bf4b1e35d2e2a4fa99c13c2c8b60a207676b6d | 0ded877bcac801a2a394ad90ed987a20caa72a4c | refs/heads/Run2_2017 | 2023-07-19T07:14:39.175712 | 2020-10-06T21:10:26 | 2020-10-06T21:10:26 | 305,753,513 | 0 | 0 | null | 2021-01-26T18:58:54 | 2020-10-20T15:32:19 | null | UTF-8 | Python | false | false | 1,892 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-1.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-2.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-3.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-4.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-5.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-6.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-7.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-8.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-9.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-10_ctau-0p1_unflavored-down_n-500_part-10.root',
] )
| [
"enochnotsocool@gmail.com"
] | enochnotsocool@gmail.com |
170e70573fcdae520eb09532e9d757a13cf799a0 | f4afb7a16696803942a999b0687e08997defb114 | /build/msf_core/catkin_generated/generate_cached_setup.py | 25d91d593b2746aee01692a2d42d884d3e9c100c | [] | no_license | EmanueleAucone/ethz_ws | 8d4760109be3b1882d875aa28f7ecfe793b1b9e6 | 883efd2936e8f67e783790d7ac8f3a40e749d1b9 | refs/heads/main | 2023-01-22T04:03:45.341419 | 2020-11-25T10:30:49 | 2020-11-25T10:30:49 | 308,606,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/emanuele/ethz_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/emanuele/ethz_ws/devel/.private/msf_core/env.sh')
output_filename = '/home/emanuele/ethz_ws/build/msf_core/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"emanuele.aucone95@gmail.com"
] | emanuele.aucone95@gmail.com |
23e21a35aa2873bd64b2132d86b739f15a362f81 | ffb78204e89b36509a0e76dc04c551aa56ebbee0 | /draw/ciyun_cn.py | bf607fa73b6fa7575cc2c047444c95f4ea181d02 | [] | no_license | b1668952669/python_study | c3624fcb6341341cc7baf267a7999c9246e0d309 | fa2c2b323c1a83f62d569c0576341e26f34ea456 | refs/heads/master | 2022-12-26T19:02:30.031100 | 2020-10-14T09:57:12 | 2020-10-14T09:57:12 | 302,525,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
from os import path
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import jieba
from wordcloud import WordCloud, STOPWORDS
###当前文件路径
d = path.dirname(__file__)
# Read the whole text.
file = open(path.join(d, 'alice.txt')).read()
##进行分词
#刚开始是分完词放进txt再打开却总是显示不出中文很奇怪
default_mode =jieba.cut(file)
text = " ".join(default_mode)
alice_mask = np.array(Image.open(path.join(d, "1.png")))
stopwords = set(STOPWORDS)
stopwords.add("said")
wc = WordCloud(
#设置字体,不指定就会出现乱码,这个字体文件需要下载
font_path=r'/usr/share/fonts/wqy-microhei/wqy-microhei.ttc',
background_color="white",
max_words=2000,
mask=alice_mask,
stopwords=stopwords)
# generate word cloud
wc.generate(text)
# store to file
wc.to_file(path.join(d, "qq_result.jpg"))
# show
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
plt.figure()
plt.imshow(alice_mask, cmap=plt.cm.gray, interpolation='bilinear')
plt.axis("off")
plt.show() | [
"you@example.com"
] | you@example.com |
c8e34fd1a8706819e83a5c25f9985ce4e9eb8215 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-cognitiveservices-search-customimagesearch/setup.py | 455120b62d9306c1671ae6ef4302e6df0f223100 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,917 | py | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-cognitiveservices-search-customimagesearch"
PACKAGE_PPRINT_NAME = "Cognitive Services Custom Image Search"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.cognitiveservices',
'azure.cognitiveservices.search',
]),
install_requires=[
'msrest>=0.5.0',
'azure-common~=1.1',
],
extras_require={
":python_version<'3.0'": ['azure-cognitiveservices-search-nspkg'],
}
)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
cd28bd5ff31c255a37db6a9689eb48cc564517f2 | 49002dd74a1e00ba0d9358912881586f26ce45b9 | /tags/models.py | 80b12639fdfa3b2c35f1423b9510ffd272c3dd8b | [] | no_license | aditya2222/django-ecommerce | 9da44959405694f2f97afa753474b5f437d319ec | 57c33a384346c4be905efde804541c91a789d750 | refs/heads/master | 2020-03-16T01:48:32.653692 | 2018-09-19T16:06:11 | 2018-09-19T16:06:11 | 126,989,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from django.db import models
from django.db.models.signals import pre_save
from ecommerce.utils import unique_slug_generator
from django.urls import reverse
from products.models import Product
# Create your models here.
class Tag(models.Model):
title = models.CharField(max_length=120)
slug = models.SlugField()
timestamp = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
products = models.ManyToManyField(Product,blank=True)
def __str__(self):
return self.title
def tag_pre_save_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(tag_pre_save_receiver, sender=Tag) | [
"adityasingh222247@gmail.com"
] | adityasingh222247@gmail.com |
aad632055722322373c3263a9680cb624f5c8ac4 | a74ea2dcde5d47344177606da383e0f006f2f35c | /test/unit/test_browserstack.py | 3c2d53ab901db3129f4ace09545f79478abefb7d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | mikalindh/qweb | 050344a55c7e4e1c9070f1ecb8b0371bdbe3b435 | d497e60081cf440ebd0eb70f84f11f7ad405100a | refs/heads/master | 2023-03-25T09:30:45.082882 | 2021-03-24T09:49:50 | 2021-03-24T09:49:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | # -*- coding: utf-8 -*-
# --------------------------
# Copyright © 2014 - 2020 Qentinel Group. All rights reserved.
#
# The software code and any other material contained in this file are property
# of Qentinel Group and protected by international copyright laws.
# Any copying, re-licensing, re-distribution, development of
# derivative works, or reverse engineering are prohibited.
# ---------------------------
from QWeb.internal.browser.bs_desktop import open_browser as ob_desktop
from QWeb.internal.browser.bs_mobile import open_browser as ob_mobile
from unittest.mock import patch
from QWeb.internal.exceptions import QWebException
import pytest
@patch('QWeb.internal.browser.bs_desktop.BuiltIn.get_variable_value')
def test_desktop_bs_open_browser(patch_robot_builtin):
patch_robot_builtin.return_value = 'foobar'
with pytest.raises(QWebException):
ob_desktop('asd', 'project_name', 'run_id_test')
@patch('QWeb.internal.browser.bs_mobile.BuiltIn.get_variable_value')
def test_mobile_bs_open_browser(patch_robot_builtin):
patch_robot_builtin.return_value = 'foobar'
with pytest.raises(QWebException):
ob_mobile('asd', 'project_name', 'run_id_test')
| [
"tuomas.koukkari@qentinel.com"
] | tuomas.koukkari@qentinel.com |
61e05c160d3d3e8a0e8883a6f682b56c9ef00fb5 | b81d6e02f061c390af2d6c06a46d2c0264c6403f | /manage.py | 61b4e483a4a795b0aa89fa79b74de93c933435a1 | [] | no_license | sadakchap/homeshop | 751ab3f4e4a3a1c150db9f62f9b089a8153dc335 | 16dc2e3fdd94f2203c94f2cf0a8c6c030647c3e2 | refs/heads/master | 2022-12-12T04:54:19.348473 | 2021-03-19T00:35:10 | 2021-03-19T00:35:10 | 198,579,813 | 0 | 0 | null | 2022-11-22T04:10:56 | 2019-07-24T07:14:58 | Python | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'homeshop.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"aliceprerna@gmail.com"
] | aliceprerna@gmail.com |
f913b623288923efd6184d03883fd81f38b4a1d2 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/fluid/tests/unittests/test_fuse_gemm_epilogue_pass.py | de75cca6f45c5d72a2cf38b72fd3df83561b12a0 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 14,653 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for role makers."""
import paddle
import os
import unittest
import numpy as np
import paddle.fluid.core as core
def compare(ref, res, atol, rtol):
ref = np.array(ref).flatten()
res = np.array(res).flatten()
tmp_ref = ref.astype(np.float64)
tol = atol + rtol * abs(tmp_ref)
diff = abs(res - ref)
indices = np.transpose(np.where(diff > tol))
if len(indices) == 0:
return True
return False
def verify_node_count(graph, node_name, target_count):
count = 0
for node in graph.nodes():
if node.name() == node_name:
count += 1
return count == target_count
class MultiFCLayer(paddle.nn.Layer):
def __init__(self, hidden, Activation):
super(MultiFCLayer, self).__init__()
self.linear1 = paddle.nn.Linear(hidden, 4 * hidden)
self.linear2 = paddle.nn.Linear(4 * hidden, hidden)
self.linear3 = paddle.nn.Linear(hidden, hidden)
self.relu1 = Activation()
self.relu2 = Activation()
self.relu3 = Activation()
def forward(self, x, matmul_y, ele_y):
output = self.linear1(x)
output = self.relu1(output)
output = self.linear2(output)
output1 = paddle.matmul(output, matmul_y)
output = self.linear3(output)
output = self.relu2(output)
output = paddle.matmul(output, matmul_y)
output = paddle.add(output, ele_y)
output = self.relu3(output)
output = paddle.add(output, output1)
return output
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueFWDBase(unittest.TestCase):
def setUp(self):
self.batch = 64
self.seqlen = 128
self.hidden = 768
paddle.enable_static()
self.main_prog = paddle.static.Program()
self.startup_prog = paddle.static.Program()
with paddle.static.program_guard(self.main_prog, self.startup_prog):
data = paddle.static.data(name="_data",
shape=[-1, self.seqlen, self.hidden],
dtype='float32')
matmul_y = paddle.static.data(name="_matmul_y",
shape=[1, self.hidden, self.hidden],
dtype='float32')
ele_y = paddle.static.data(name="_ele_y",
shape=[
self.hidden,
],
dtype='float32')
multi_layer = MultiFCLayer(self.hidden, self._get_act_type()[0])
with paddle.static.amp.fp16_guard():
out = multi_layer(data, matmul_y, ele_y)
self.loss = paddle.mean(out)
self.data_arr = np.random.random(
(self.batch, self.seqlen, self.hidden)).astype("float32") - 0.5
self.matmul_y_arr = np.random.random(
(1, self.hidden, self.hidden)).astype("float32") - 0.5
self.ele_y_arr = np.random.random(
(self.hidden, )).astype("float32") - 0.5
self.place = paddle.CUDAPlace(0)
self.exe = paddle.static.Executor(self.place)
self.exe.run(self.startup_prog)
self._pre_test_hooks()
self.feed = {
"_data": self.data_arr,
"_matmul_y": self.matmul_y_arr,
"_ele_y": self.ele_y_arr
}
self.reference = self.exe.run(self.main_prog,
feed=self.feed,
fetch_list=[self.loss.name])
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def _test_output(self):
build_strategy = paddle.static.BuildStrategy()
build_strategy.fuse_gemm_epilogue = True
program = paddle.static.CompiledProgram(self.main_prog)
program = program.with_data_parallel(loss_name=self.loss.name,
build_strategy=build_strategy,
places=paddle.static.cuda_places())
result = self.exe.run(program,
feed=self.feed,
fetch_list=[self.loss.name])
self.assertTrue(
compare(self.reference, result, self.atol, self.rtol),
"[{}] outputs are miss-matched.".format(type(self).__name__))
self.assertTrue(
verify_node_count(program._graph, "fused_gemm_epilogue", 3),
"[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph."
.format(type(self).__name__))
act_fwd_name = self._get_act_type()[1]
self.assertTrue(
verify_node_count(program._graph, act_fwd_name, 1),
"[{}] The number of {} is miss-matched in the computing graph.".
format(type(self).__name__, act_fwd_name))
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.ReLU, "relu"
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueReluFWDFP32(TestFuseGemmEpilogueFWDBase):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
def _get_act_type(self):
return paddle.nn.ReLU, "relu"
def test_output(self):
self._test_output()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueReluFWDFP16(TestFuseGemmEpilogueReluFWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list)
self.data_arr = self.data_arr.astype("float16")
self.matmul_y_arr = self.matmul_y_arr.astype("float16")
self.ele_y_arr = self.ele_y_arr.astype("float16")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGeluFWDFP32(TestFuseGemmEpilogueFWDBase):
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.GELU, "gelu"
def test_output(self):
self._test_output()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGeluFWDFP16(TestFuseGemmEpilogueGeluFWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list)
self.data_arr = self.data_arr.astype("float16")
self.matmul_y_arr = self.matmul_y_arr.astype("float16")
self.ele_y_arr = self.ele_y_arr.astype("float16")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueBWDBase(unittest.TestCase):
def setUp(self):
self.batch = 64
self.seqlen = 128
self.hidden = 768
paddle.enable_static()
self.main_prog = paddle.static.Program()
self.startup_prog = paddle.static.Program()
with paddle.static.program_guard(self.main_prog, self.startup_prog):
data = paddle.static.data(name="_data",
shape=[-1, self.seqlen, self.hidden],
dtype='float32')
matmul_y = paddle.static.data(name="_matmul_y",
shape=[1, self.hidden, self.hidden],
dtype='float32')
ele_y = paddle.static.data(name="_ele_y",
shape=[
self.hidden,
],
dtype='float32')
multi_layer = MultiFCLayer(self.hidden, self._get_act_type()[0])
with paddle.static.amp.fp16_guard():
out = multi_layer(data, matmul_y, ele_y)
self.loss = paddle.mean(out)
paddle.static.append_backward(loss=self.loss)
self.data_arr = np.random.random(
(self.batch, self.seqlen, self.hidden)).astype("float32") - 0.5
self.matmul_y_arr = np.random.random(
(1, self.hidden, self.hidden)).astype("float32") - 0.5
self.ele_y_arr = np.random.random(
(self.hidden, )).astype("float32") - 0.5
self.place = paddle.CUDAPlace(0)
self.exe = paddle.static.Executor(self.place)
self.exe.run(self.startup_prog)
self._pre_test_hooks()
self.feed = {
"_data": self.data_arr,
"_matmul_y": self.matmul_y_arr,
"_ele_y": self.ele_y_arr
}
self.fetch = [
self.loss.name,
'{}.w_0@GRAD'.format(multi_layer.linear1.full_name()),
'{}.b_0@GRAD'.format(multi_layer.linear1.full_name()),
'{}.w_0@GRAD'.format(multi_layer.linear2.full_name()),
'{}.b_0@GRAD'.format(multi_layer.linear2.full_name()),
'{}.w_0@GRAD'.format(multi_layer.linear3.full_name()),
'{}.b_0@GRAD'.format(multi_layer.linear3.full_name())
]
self.outs_ref = self.exe.run(self.main_prog,
feed=self.feed,
fetch_list=self.fetch)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def _test_output(self):
build_strategy = paddle.static.BuildStrategy()
build_strategy.fuse_gemm_epilogue = True
program = paddle.static.CompiledProgram(self.main_prog)
program = program.with_data_parallel(loss_name=self.loss.name,
build_strategy=build_strategy,
places=paddle.static.cuda_places())
outs_res = self.exe.run(program, feed=self.feed, fetch_list=self.fetch)
for ref, res in zip(self.outs_ref, outs_res):
self.assertTrue(
compare(ref, res, self.atol, self.rtol),
"[{}] output is miss-matched.".format(type(self).__name__))
self.assertTrue(
verify_node_count(program._graph, "fused_gemm_epilogue", 3),
"[{}] The number of fused_gemm_epilogue is miss-matched in the computing graph."
.format(type(self).__name__))
self.assertTrue(
verify_node_count(program._graph, "fused_gemm_epilogue_grad", 3),
"[{}] The number of fused_gemm_epilogue_grad is miss-matched in the computing graph."
.format(type(self).__name__))
_, act_fwd_name, act_bwd_name = self._get_act_type()
self.assertTrue(
verify_node_count(program._graph, act_fwd_name, 1),
"[{}] The number of {} is miss-matched in the computing graph.".
format(type(self).__name__, act_fwd_name))
self.assertTrue(
verify_node_count(program._graph, act_bwd_name, 2),
"[{}] The number of {} is miss-matched in the computing graph.".
format(type(self).__name__, act_bwd_name))
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.ReLU, "relu", "relu_grad"
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueReLUBWDFP32(TestFuseGemmEpilogueBWDBase):
def _pre_test_hooks(self):
self.atol = 1e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.ReLU, "relu", "relu_grad"
def test_output(self):
self._test_output()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueReLUBWDFP16(TestFuseGemmEpilogueReLUBWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list)
self.data_arr = self.data_arr.astype("float16")
self.matmul_y_arr = self.matmul_y_arr.astype("float16")
self.ele_y_arr = self.ele_y_arr.astype("float16")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGeLUBWDFP32(TestFuseGemmEpilogueBWDBase):
def _pre_test_hooks(self):
self.atol = 5e-4
self.rtol = 1e-3
def _get_act_type(self):
return paddle.nn.GELU, "gelu", "gelu_grad"
def test_output(self):
self._test_output()
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueGeLUBWDFP16(TestFuseGemmEpilogueGeLUBWDFP32):
def _pre_test_hooks(self):
self.atol = 1e-3
self.rtol = 1e-2
fp16_var_list = paddle.static.amp.cast_model_to_fp16(self.main_prog)
paddle.static.amp.cast_parameters_to_fp16(
self.place, self.main_prog, to_fp16_var_names=fp16_var_list)
self.data_arr = self.data_arr.astype("float16")
self.matmul_y_arr = self.matmul_y_arr.astype("float16")
self.ele_y_arr = self.ele_y_arr.astype("float16")
if __name__ == "__main__":
np.random.seed(0)
unittest.main()
| [
"noreply@github.com"
] | Qengineering.noreply@github.com |
1b7080fb26e0ce9cf9dbd7d96cff8cbea8235f30 | 63b7671b5296b97aa5271d076540e0d81b85d599 | /strongr/schedulerdomain/model/vm.py | e6068057a52774cf1b55e44b231bfcae90ef6e46 | [
"Apache-2.0"
] | permissive | bigr-erasmusmc/StrongR | bd30d7f41c1fef87bd241c0c4ea059f88c5c426e | 48573e170771a251f629f2d13dba7173f010a38c | refs/heads/master | 2021-10-10T10:17:15.344510 | 2018-11-20T10:21:50 | 2018-11-20T10:21:50 | 112,336,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | import strongr.core.gateways as gateways
from sqlalchemy import Column, ForeignKey, Integer, String, Enum, DateTime, func
from sqlalchemy.orm import relationship, synonym
from strongr.schedulerdomain.model.vmstate import VmState
Base = gateways.Gateways.sqlalchemy_base()
class Vm(Base):
__tablename__ = 'vms'
vm_id = Column(String(64), primary_key=True)
cores = Column(Integer)
ram = Column(Integer)
deadline = Column(DateTime())
jobs = relationship('Job', back_populates='vm')
_state = Column('state', Enum(VmState))
# In classical SQL we would put a trigger to update this field with NOW() if the state-field is updated.
# SQLAlchemy has no way to write triggers without writing platform-dependent SQL at the time of writing.
# Instead we use a setter on the state-field, this setter updates the state_date as well.
# The disadvantage of this solution is that other apps need to implement logic like this as well making
# the solution less portable.
state_date = Column(DateTime())
@property
def state(self):
return self._state
# update state_date-field as well when we update state-field
@state.setter
def state(self, value):
self._state = value
self.state_date = func.now()
# create a synonym so that _state and state are considered the same field by the mapper
state = synonym('_state', descriptor=state)
| [
"thomas@tphil.nl"
] | thomas@tphil.nl |
e1e388a78a05c376e2cbdfeb5ec293a6dca9f083 | d048a865519b5f944e1430c6181d00399c979d9c | /Assingnment/gallery/img_gallery/migrations/0001_initial.py | 8fb53d3015c4545c39b089c4691af2117f8fda08 | [] | no_license | jithinvijayan007/PaceWisdom- | 5f84261c4ba7f51e25c8c21074b48214a24cb6d2 | 1ba00814a757edb327923afcaf20fe04652efa0e | refs/heads/master | 2023-03-06T04:00:21.729404 | 2021-02-21T18:56:54 | 2021-02-21T18:56:54 | 340,974,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | # Generated by Django 3.0.3 on 2021-02-21 15:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images')),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"jithinvijayan007@gmail.com"
] | jithinvijayan007@gmail.com |
7bc1b6b21028f0853439b41809f0d22eaed51200 | f11ecb59dab63af605c6e5f256ee59e00447ecc1 | /412-fizz-buzz.py | 9f47ee36c12dc713615cf3877642178d6e215b00 | [] | no_license | floydchenchen/leetcode | 626d55f72ec914764385ce82b0f3c57f5a7e9de8 | 9d9e0c08992ef7dbd9ac517821faa9de17f49b0e | refs/heads/master | 2022-10-07T20:33:55.728141 | 2020-06-08T16:09:17 | 2020-06-08T16:09:17 | 269,525,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | # 412. Fizz Buzz
# Write a program that outputs the string representation of numbers from 1 to n.
#
# But for multiples of three it should output “Fizz” instead of the number and for the multiples of five output “Buzz”.
# For numbers which are multiples of both three and five output “FizzBuzz”.
#
# Example:
#
# n = 15,
#
# Return:
# [
# "1",
# "2",
# "Fizz",
# "4",
# "Buzz",
# "Fizz",
# "7",
# "8",
# "Fizz",
# "Buzz",
# "11",
# "Fizz",
# "13",
# "14",
# "FizzBuzz"
# ]
class Solution:
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
return ["Fizz" * (i % 3 == 0) + "Buzz" * (i % 5 == 0) or str(i) for i in range(1, n + 1)] | [
"chen2918@umn.edu"
] | chen2918@umn.edu |
88d6064b90f738ef9c08e4382d55d2de6915acf0 | 32174f2b74b286a52a2f3b0bfd120a0711bfc6dc | /src/mediafiles/urls.py | 57a892f53a61549764e3bdc61a328905c8a3fadb | [
"MIT"
] | permissive | hdknr/django-mediafiles | d13172162506cba2abdab0d85bc2815e2e24b6e6 | 7526e35eb7f532e36c95e7aa76290bb95a9ac41a | refs/heads/master | 2020-06-04T03:16:28.824865 | 2014-10-30T04:10:40 | 2014-10-30T04:10:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | from django.conf.urls import patterns, include, url
from views import *
urlpatterns = patterns('',
url('thumbnail/(?P<id>.+)/(?P<width>\d+)x(?P<height>\d+)',thumbnail,name='mediafiles_thumbnail'),
url('preview/(?P<id>.+)',preview,name='mediafiles_preview'),
url('download/(?P<id>.+)',download,name='mediafiles_download'),
url('gallery/admin/(?P<id>\d+)/media/create',GalleryAdminMediaCreateView, name='gallery_admin_media_create'),
url('gallery/admin/(?P<id>\d+)/media/(?P<mid>\d+)/delete',GalleryAdminMediaDeleteView, name='gallery_admin_media_delete'),
url('gallery/admin/(?P<id>\d+)/media/(?P<mid>\d+)/image',GalleryAdminMediaImageView, name='gallery_admin_media_image'),
url('gallery/admin/(?P<id>\d+)/media/(?P<mid>\d+)/thumb',GalleryAdminMediaThumbView, name='gallery_admin_media_thumb'),
url('gallery/admin/(?P<id>\d+)',GalleryAdminDetailView, name='gallery_admin_detail'),
url('gallery/admin/',GalleryAdminListView, name='gallery_admin_list'),
)
| [
"gmail@hdknr.com"
] | gmail@hdknr.com |
cd6044a0a3f8e54b2352e2173edf15d2d0c570ee | 55e7503b0b4370c9537334359019cdc8e0e70096 | /Liao/051Pillow.py | 2863be318ecb27f0aaba22bcdd341e3e21d78976 | [] | no_license | dumin199101/PythonLearning | 4f3a86e529c3c873174cf12baf694c4d6c3ef5eb | deece6b4d0b44d4817f24e1cf9da6d69301139d4 | refs/heads/master | 2021-01-22T07:52:25.668045 | 2018-06-13T02:03:29 | 2018-06-13T02:03:29 | 92,582,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | #!/usr/bin/env python3
# _*_ coding=utf-8 _*_
from PIL import Image
# 打开一个jpg图像文件,注意是当前路径:
im = Image.open('test.jpg')
# 获得图像尺寸:
w, h = im.size
print('Original image size: %sx%s' % (w, h))
# 缩放到50%:
im.thumbnail((w//2, h//2))
print('Resize image to: %sx%s' % (w//2, h//2))
# 把缩放后的图像用jpeg格式保存:
im.save('thumbnail.jpg', 'jpeg') | [
"1766266374@qq.com"
] | 1766266374@qq.com |
99d67089539c2c79d725824080ed8b7654b0f8f4 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2009/kernel/default/kernel/files/scripts/bump-config.py | 02e04e83c5aae09d6cc0f57ef41f91c4685e14fb | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import pisi
oldwd = os.getcwd()
kpspec = pisi.specfile.SpecFile('pspec.xml')
kworkdir = kpspec.source.archive.name.split('.tar.bz2')[0]
kname = kpspec.source.name
kver = kpspec.history[0].version
krel = kpspec.history[0].release
kpath = "/var/pisi/%s-%s-%s/work/%s" % (kname, kver, krel, kworkdir)
if os.path.exists(kpath):
os.chdir(kpath)
open(os.path.join(oldwd, "files/pardus/kernel-config.patch"), "w").write(os.popen("diff -u /dev/null .config").read())
os.chdir(oldwd)
else:
print "%s doesn't exist." % kpath
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
1977dce0a9d97752ddddd0f13ff2041d285c3f7e | 9468507c1beeb2cb69591889605ea155d2cb7a63 | /polls/urls.py | 8256069f1b75de4dabb4adbe29504e0dff123368 | [] | no_license | nimal54/drf-polls | 2375e2f5b78670de40c72b51eb616a69e7f49a65 | 9b29230998146eb225e0cffa0703d6bed1cc876a | refs/heads/master | 2020-04-25T00:21:14.952917 | 2018-03-16T11:54:53 | 2018-03-16T11:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.urls import path
from .views import QuestionViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('', QuestionViewSet, base_name='questions')
urlpatterns = router.urls | [
"will@wsvincent.com"
] | will@wsvincent.com |
23dd4389391429f8cce0aacb515e18bd761cbdf1 | 4c62ac4d7e5e28baf41eb44a39a41225d1b56d6f | /prospection/settings.py | 9e563e2848c2859065428dc47c083fb042aef606 | [] | no_license | RogerMendez/prospection | de10fc3d67f5c80d475b68c43fd3579eed8d5fc3 | 1634ff07ac25ae3280ec4f02bd99c408ab9fec86 | refs/heads/master | 2016-08-12T13:47:51.349905 | 2016-04-04T15:42:51 | 2016-04-04T15:42:51 | 53,613,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | """
Django settings for prospection project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r_8f_28ozie1g@60s_295_#srd+61y4w-=+ea&zbd6prxp_#y7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
'publishers',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'prospection.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'Templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'prospection.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'prospection_development',
'USER':'postgres',
'PASSWORD': '76176338',
'HOST': '127.0.0.1',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es-BO'
TIME_ZONE = 'America/La_Paz'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/media/'
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
'''EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sieboliva@gmail.com'
EMAIL_HOST_PASSWORD = 'siebolivia2012'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'sieboliva@gmail.com'
EMAIL_HOST_PASSWORD = 'fsmfarxuvbzhrmav'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
ADDREES = 'http://127.0.0.1:8000'
#ADDREES = 'http://192.168.1.107:8000' | [
"Roger.Mendez.R@gmail.com"
] | Roger.Mendez.R@gmail.com |
2c4e66fe5238df3f91668e2e39749c7c36ffcfd5 | 13d93c2922005af35056d015f1ae3ebebe05ee31 | /python/oreilly/cours_python/chap12/formes.py | 862c0a179aa1ad87fcd1a5f2d597996742b25e61 | [] | no_license | scls19fr/openphysic | 647cc2cdadbdafd050d178e02bc3873bd2b07445 | 67bdb548574f4feecb99b60995238f12f4ef26da | refs/heads/master | 2021-04-30T23:16:26.197961 | 2020-11-16T20:21:17 | 2020-11-16T20:21:17 | 32,207,155 | 1 | 1 | null | null | null | null | ISO-8859-1 | Python | false | false | 959 | py | #! /usr/bin/env python
# -*- coding: Latin-1 -*-
class Rectangle:
"Classe de rectangles"
def __init__(self, longueur =0, largeur =0):
self.L = longueur
self.l = largeur
self.nom ="rectangle"
def perimetre(self):
return "(%d + %d) * 2 = %d" % (self.L, self.l,
(self.L + self.l)*2)
def surface(self):
return "%d * %d = %d" % (self.L, self.l, self.L*self.l)
def mesures(self):
print "Un %s de %d sur %d" % (self.nom, self.L, self.l)
print "a une surface de %s" % (self.surface(),)
print "et un périmètre de %s\n" % (self.perimetre(),)
class Carre(Rectangle):
"Classe de carrés"
def __init__(self, cote):
Rectangle.__init__(self, cote, cote)
self.nom ="carré"
if __name__ == "__main__":
r1 = Rectangle(15, 30)
r1.mesures()
c1 = Carre(13)
c1.mesures()
| [
"s.celles@gmail.com@41f3eeec-7763-abce-c6e2-0c955b6d8259"
] | s.celles@gmail.com@41f3eeec-7763-abce-c6e2-0c955b6d8259 |
86a766881113c16b0a2de95fe14d55daa2c7f7e7 | 14bad9231bef3ae79ba7b0c3837b34f6c89bc49b | /s1.py | 9583c5ed78a140f7258f820b9214bdda008fab5b | [] | no_license | Nagajothikp/Nagajothi | 80dc36d167a1143c203c594d25d9dc31892bd239 | 0d80c40c47d315cbcbc2d43bb2d77887d17414d4 | refs/heads/master | 2022-01-25T18:58:24.173042 | 2019-08-07T06:36:11 | 2019-08-07T06:36:11 | 194,424,732 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | string="abcd"
print(string[::-1]) | [
"kpnagajothi1999@gmail.com"
] | kpnagajothi1999@gmail.com |
e0c94600e33f8b9d33d36a64155d9371bb088189 | 349a835358dd431feb78ca0d62e1c22b3eb1b7a5 | /instatools/instaapp/insta.py | 6b137bcf1f7be3cc2d42c21668bfd29c331e609b | [] | no_license | priyankush-siloria/Insta | 44b04334ca7b6b4d195288f00daa647c0ed6f5f1 | e3a1ecfe2c6a4e646ea0bc10e836f649a36eef91 | refs/heads/master | 2020-07-07T13:18:39.976166 | 2019-08-20T11:02:27 | 2019-08-20T11:02:27 | 203,359,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,091 | py |
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
from selenium.common.exceptions import NoSuchElementException,StaleElementReferenceException, TimeoutException
import random
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import sqlite3
from sqlite3 import Error
import os
import smtplib
import time
import imaplib
import email
import json
from .models import *
import traceback
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
def send_keys(element, keys):
if element.get_attribute("type") == "checkbox":
if element.get_attribute("checked"):
element.send_keys(Keys.SPACE)
else:
element.clear()
element.send_keys(keys)
def user_login(driver,username,password):
try:
print(username,password)
driver.get('https://www.instagram.com/accounts/login/')
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//h1[@class='NXVPg Szr5J coreSpriteLoggedOutWordmark']"))))
send_keys(driver.find_element_by_name("username"), username)
send_keys(driver.find_element_by_name("password"), password)
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//button[@type='submit']"))))
driver.find_element_by_xpath("//button[@type='submit']").click()
# close the notification popup
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//button[contains(text(),'Not Now')]"))));
driver.find_element_by_xpath("//button[contains(text(),'Not Now')]").click()
return True
except Exception as e:
# raise e
print(str(e))
return False
driver.quit()
def get_data(insta_user,insta_pass):
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
driver_path = os.path.join(base_dir, 'chromedriver')
driver = webdriver.Chrome(executable_path=driver_path,options=options)
try:
is_user_logged=user_login(driver,insta_user,insta_pass)
print("is_user_logged",is_user_logged)
if is_user_logged:
driver.get('https://www.instagram.com')
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//div[@class='nwXS6']"))));
driver.find_element_by_xpath("//div[@class='nwXS6']").click()
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//div[@class='nZSzR']/h1"))));
login_username=driver.find_element_by_xpath("//div[@class='nZSzR']/h1").text
print("login_username",login_username)
username=driver.find_element_by_xpath("//h1[@class='rhpdm']").text
print("username",username)
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//ul[@class='k9GMp ']"))));
posts=driver.find_elements_by_xpath("//ul[@class='k9GMp ']/li")[0].text
followers=driver.find_elements_by_xpath("//ul[@class='k9GMp ']/li")[1].text
following=driver.find_elements_by_xpath("//ul[@class='k9GMp ']/li")[2].text
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//a[@class='_0ZPOP kIKUG ']"))));
driver.find_element_by_xpath('//a[@class="_0ZPOP kIKUG "]').click()
follow_requests=''
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, ("//div[@class='JRHhD']"))));
follow_requests=driver.find_element_by_xpath("//div[@class='JRHhD']").text
if not follow_requests:
follow_requests=0
print(posts,followers,following,follow_requests)
try:
obj=UserAccounts.objects.get(email_account=insta_user)
except UserAccounts.DoesNotExist:
pass
try:
UserInstaDetail.objects.create(
insta_user=obj,
posts=posts,
total_followers=followers,
total_follows=following,
pending_follow_request=follow_requests
)
except:
pass
return True
else:
print("Error....User is not able to login")
return False
except Exception as e:
raise e
return False
driver.quit()
finally:
driver.quit()
| [
"you@example.com"
] | you@example.com |
85df5a236ac20d443220e956a0d722c99f0526b5 | 1df7ba55c4b61772c1a31c503e6b8881f1456dc5 | /untitled9/apps/courses/migrations/0012_auto_20170211_1218.py | c583d480b497bd5e2a05121592b29b4d9fca5848 | [] | no_license | fzk466569/python-django-pro | 35918756060fcae375d3c99ea1a6934949b6d605 | 9add086b7a910f255df5b192268f1e117057e053 | refs/heads/master | 2021-01-19T13:18:14.141880 | 2017-02-19T12:16:29 | 2017-02-19T12:16:29 | 82,374,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-11 12:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0011_course_is_banner'),
]
operations = [
migrations.AlterField(
model_name='courseresource',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='\u8bfe\u7a0b\u540d\u79f0'),
),
]
| [
"fzk466569"
] | fzk466569 |
e9da4a005399d3609d065e6748fe5a15aa10506a | 6daaf3cecb19f95265188adc9afc97e640ede23c | /排序.py | cb6b07db9e984f205c218909dd2b24b41c29796d | [] | no_license | guoweifeng216/python | 723f1b29610d9f536a061243a64cf68e28a249be | 658de396ba13f80d7cb3ebd3785d32dabe4b611d | refs/heads/master | 2021-01-20T13:11:47.393514 | 2019-12-04T02:23:36 | 2019-12-04T02:23:36 | 90,457,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | #coding=utf-8
"""
@athor:weifeng.guo
@data:2018/9/28 14:41
@filename:排序
"""
from operator import itemgetter
alist = [('2', '3', '10'), ('1', '2', '3'), ('5', '6', '7'), ('2', '5', '10'), ('2', '4', '10')]
# 多级排序,先按照第3个元素排序,然后按照第2个元素排序:
print sorted(alist, cmp=None, key=itemgetter(2, 1), reverse=False)
print sorted(alist, cmp=None, key=lambda x: itemgetter(2, 1)(x), reverse=False)
print sorted(alist, cmp=None, key=lambda x: map(int, itemgetter(2, 1)(x)), reverse=False) | [
"weifeng.guo@cnexlabs.com"
] | weifeng.guo@cnexlabs.com |
e56eec74dc030dc0c2711eb24adffbec735c8b01 | dc418f7d888ffe73b8f8df2e1fe7b7795f12518a | /code/treePlotter.py | 7590055925b488adca592c1794421e26d0ac2fc8 | [] | no_license | ForrestPi/ML | f936458fa48033940c4d1629e7bc85a0f28a601d | 4e931f2f09beecadbaa79aed5061ae11f5b5d456 | refs/heads/master | 2021-01-17T12:55:10.832957 | 2016-07-12T15:13:32 | 2016-07-12T15:13:32 | 60,149,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,820 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14)
decisionNode = dict(boxstyle="sawtooth",fc="0.8")
leafNode = dict(boxstyle="round4",fc="0.8")
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeTxt,centerPt,parentPt,nodeType):
createPlot.ax1.annotate(nodeTxt,fontproperties=font,xy=parentPt,xycoords='axes fraction',
xytext=centerPt,textcoords='axes fraction',
va="center",ha="center",bbox=nodeType,arrowprops=arrow_args)
def createPlot():
fig=plt.figure(1,facecolor='white')
fig.clf()
createPlot.ax1 = plt.subplot(111,frameon=False)
plotNode(U'决策节点',(0.5,0.1),(0.1,0.5),decisionNode)
plotNode(U'叶节点',(0.8,0.1),(0.3,0.8),leafNode)
plt.show()
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
def getNumLeafs(myTree):
numLeafs=0
firstStr=myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
| [
"forrest_zhu@foxmail.com"
] | forrest_zhu@foxmail.com |
5e1f9a3d1cd7ed58d4ea2d85df6a90c49a4d2d71 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/vkn.py | 772b386271afc30a18a93ffc226528508f70a51f | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'vKN':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
3bfa859eb68da1097de82b39eef158ca52df3a1f | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /deriva-annotations/catalog50/catalog-configs/PDB/pdbx_ion_info.py | 819386249a80989fe62fcaf387987aa437a1ee7b | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 3,844 | py | import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {}
table_name = 'pdbx_ion_info'
schema_name = 'PDB'
column_annotations = {
'structure_id': {},
'id': {},
'name': {},
'numb_per_asym_unit': {},
'Owner': {}
}
column_comment = {
'structure_id': 'A reference to table entry.id.',
'id': 'type:text\nSerial number.\nexamples:1',
'name': 'type:text\nName of ion.\nexamples:MG',
'numb_per_asym_unit': 'type:int4\nNumber of ion molecules per asymmetric unit.\nexamples:1,2,3',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'structure_id',
em.builtin_types['text'],
nullok=False,
comment=column_comment['structure_id'],
),
em.Column.define('id', em.builtin_types['text'], nullok=False, comment=column_comment['id'],
),
em.Column.define(
'name', em.builtin_types['text'], nullok=False, comment=column_comment['name'],
),
em.Column.define(
'numb_per_asym_unit',
em.builtin_types['int4'],
nullok=False,
comment=column_comment['numb_per_asym_unit'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', {
'source': [{
'outbound': ['PDB', 'pdbx_ion_info_structure_id_fkey']
}, 'RID'],
'comment': 'A reference to table entry.id.',
'markdown_name': 'structure id'
}, 'id', 'name', 'numb_per_asym_unit', ['PDB', 'pdbx_ion_info_RCB_fkey'],
['PDB', 'pdbx_ion_info_RMB_fkey'], 'RCT', 'RMT', ['PDB', 'pdbx_ion_info_Owner_fkey']
],
'entry': [
{
'source': [{
'outbound': ['PDB', 'pdbx_ion_info_structure_id_fkey']
}, 'RID'],
'comment': 'A reference to table entry.id.',
'markdown_name': 'structure id'
}, 'id', 'name', 'numb_per_asym_unit'
]
}
table_annotations = {chaise_tags.visible_columns: visible_columns, }
table_comment = 'Details of ions'
table_acls = {}
table_acl_bindings = {}
key_defs = [
em.Key.define(['RID'], constraint_names=[['PDB', 'pdbx_ion_info_RIDkey1']],
),
em.Key.define(
['structure_id', 'id'], constraint_names=[['PDB', 'pdbx_ion_info_primary_key']],
),
]
fkey_defs = [
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['PDB', 'pdbx_ion_info_RCB_fkey']],
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[['PDB', 'pdbx_ion_info_RMB_fkey']],
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 50
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = ErmrestCatalog('https', host, catalog_id=catalog_id, credentials=get_credential(host))
main(catalog, mode, replace)
| [
"brinda.vallat@rcsb.org"
] | brinda.vallat@rcsb.org |
7a42e8446c4f29d13b600eb38a85f23b73061de8 | f640fcb49bf99ebec5f34603748121fbbe9171dc | /lib_openshift/apis/apisextensions_api.py | d993d831c5339f04067ee7968dc716a0c3814209 | [] | no_license | tbielawa/lib_openshift | bea8a11c4904a7d6c815abdd2b206de5a4cc7a93 | 34ca0f6a0c5388624a040223f29552dc4c0f8c49 | refs/heads/master | 2023-06-16T22:41:15.894021 | 2016-07-11T21:26:59 | 2016-07-11T21:26:59 | 63,156,531 | 0 | 0 | null | 2016-07-12T12:35:29 | 2016-07-12T12:35:29 | null | UTF-8 | Python | false | false | 3,843 | py | # coding: utf-8
"""
ApisextensionsApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ApisextensionsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_api_group(self, **kwargs):
"""
get information of a group
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_api_group(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
params[key] = val
del params['kwargs']
resource_path = '/apis/extensions'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| [
"jdetiber@redhat.com"
] | jdetiber@redhat.com |
4de59e27dae5a22267169f15cda395acfeff2630 | 3b9d763180410bf0abf5b9c37391a64319efe839 | /toontown/suit/DistributedMintSuit.py | ea4e5dffb2a6dd4f64898cc7fb7e2feeab0c0a9e | [] | no_license | qphoton/Reverse_Engineering_Project_ToonTown | 442f15d484324be749f6f0e5e4e74fc6436e4e30 | 11468ab449060169191366bc14ff8113ee3beffb | refs/heads/master | 2021-05-08T00:07:09.720166 | 2017-10-21T02:37:22 | 2017-10-21T02:37:22 | 107,617,661 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # File: D (Python 2.4)
from toontown.suit import DistributedFactorySuit
from direct.directnotify import DirectNotifyGlobal
class DistributedMintSuit(DistributedFactorySuit.DistributedFactorySuit):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMintSuit')
| [
"Infinitywilee@rocketmail.com"
] | Infinitywilee@rocketmail.com |
2029409d84ed8500355b47b38c34f3ef01a08359 | 473568bf080e3637ee118b374f77e9f561286c6c | /InterviewCorner/HourGlassSum.py | 398c30d4e7319db8e920ee42c4d229357d020db2 | [] | no_license | VineetPrasadVerma/GeeksForGeeks | c2f7fc94b0a07ba146025ca8a786581dbf7154c8 | fdb4e4a7e742c4d67015977e3fbd5d35b213534f | refs/heads/master | 2020-06-02T11:23:11.421399 | 2020-01-07T16:51:18 | 2020-01-07T16:51:18 | 191,138,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | arr = [
[1, 1, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 0, 2, 4, 4, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 1, 2, 4, 0]
]
lst_hourglass_sum = []
def find_hour_glass_sum(upper_limit, lower_limit):
temp = 0
while temp < 4:
per_hour_glass_sum = 0
flag = True
for i in range(upper_limit, lower_limit):
for j in range(temp, temp+3):
if flag:
per_hour_glass_sum += arr[i][j]
if not flag:
if j == temp + 3 - 2:
per_hour_glass_sum += arr[i][j]
flag = not flag
lst_hourglass_sum.append(per_hour_glass_sum)
temp += 1
for i in range(4):
find_hour_glass_sum(i, i+3)
print(lst_hourglass_sum) | [
"vineetpd1996@gmail.com"
] | vineetpd1996@gmail.com |
5ad94a29bd0907446a9d0f90e76f1aeacadf8912 | bb151500b0fc5bb9ef1b1a9e5bba98e485b4b34d | /problemSet/593D-Happy_Tree_Party.py | 1cffb9c69b44fe7fcb384b86014bc74d21c8e1d5 | [] | no_license | yamaton/codeforces | 47b98b23da0a3a8237d9021b0122eaa498d98628 | e0675fd010df852c94eadffdf8b801eeea7ad81b | refs/heads/master | 2021-01-10T01:22:02.338425 | 2018-11-28T02:45:04 | 2018-11-28T03:21:45 | 45,873,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | """
Codeforces Round #329 (Div. 2)
Problem 593 D. Happy Tree Party
@author yamaton
@date 2015-11-04
"""
import itertools as it
import functools
import operator
import collections
import math
import sys
def solve(triples):
pass
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
n = int(input())
[n, m] = [int(i) for i in input().strip().split()]
triples = [tuple(int(i) for i in input().strip().split()) for _ in range(n-1)]
operations = [tuple(int(i) for i in input().strip().split()) for _ in range(m)]
result = solve(triples)
# print(result)
if __name__ == '__main__':
main()
| [
"yamaton@gmail.com"
] | yamaton@gmail.com |
b321a58832872eba95d18547d884255292ba442a | 6a3dfbf1c0398731520dc34663423e18149a8976 | /web/pgadmin/browser/server_groups/servers/databases/schemas/tests/test_schema_delete.py | 53f33c2d45d0f4e6c9546878070462a2e9151a49 | [
"PostgreSQL"
] | permissive | Kibetchirchir/pgadmin4 | 29272912aa5ee32d33d0f8267e86ed958e4f2dd7 | fe264aafa7e2fb8eced49a5c24e13484a3552fdf | refs/heads/master | 2022-05-24T17:52:55.485701 | 2020-04-28T05:34:43 | 2020-04-28T05:34:43 | 259,569,015 | 1 | 0 | NOASSERTION | 2020-04-28T07:52:51 | 2020-04-28T07:52:51 | null | UTF-8 | Python | false | false | 2,865 | py | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as schema_utils
class SchemaDeleteTestCase(BaseTestGenerator):
""" This class will add new schema under database node. """
scenarios = [
# Fetching default URL for extension node.
('Check Schema Node URL', dict(url='/browser/schema/obj/'))
]
def setUp(self):
self.database_info = parent_node_dict["database"][-1]
self.db_name = self.database_info["db_name"]
# Change the db name, so that schema will create in newly created db
self.schema_name = "schema_get_%s" % str(uuid.uuid4())[1:8]
connection = utils.get_db_connection(self.db_name,
self.server['username'],
self.server['db_password'],
self.server['host'],
self.server['port'])
self.schema_details = schema_utils.create_schema(connection,
self.schema_name)
def runTest(self):
""" This function will delete schema under database node. """
server_id = self.database_info["server_id"]
db_id = self.database_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
server_id, db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to delete the"
" schema.")
schema_id = self.schema_details[0]
schema_name = self.schema_details[1]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
schema_name)
if not schema_response:
raise Exception("Could not find the schema to delete.")
response = self.tester.delete(self.url + str(utils.SERVER_GROUP) +
'/' + str(server_id) + '/' +
str(db_id) + '/' + str(schema_id),
follow_redirects=True)
self.assertEquals(response.status_code, 200)
def tearDown(self):
pass
| [
"dpage@pgadmin.org"
] | dpage@pgadmin.org |
1f0831c7a14bbfae8f99544b605c157d603541ca | 22c5fc7dd52149ebd4338a487ae9ab0db0e43f01 | /tests/test_keep_largest_connected_component.py | 5e6dd716b12478f0f51a3c819106a6af3d59a08f | [
"Apache-2.0"
] | permissive | precision-medicine-um/MONAI-Deep_Learning | 3d3f547dd9815152561a6853f8d4727b0e5ca4c4 | d94c4d3a2c465717ba3fae01b7acea7fada9885b | refs/heads/master | 2022-12-28T07:04:07.768415 | 2020-10-17T13:11:56 | 2020-10-17T13:11:56 | 305,346,962 | 3 | 0 | Apache-2.0 | 2022-12-27T15:44:13 | 2020-10-19T10:30:07 | Python | UTF-8 | Python | false | false | 15,637 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.transforms import KeepLargestConnectedComponent
grid_1 = torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]])
grid_2 = torch.tensor([[[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [1, 0, 1, 1, 2], [1, 0, 1, 2, 2], [0, 0, 0, 0, 1]]]])
grid_3 = torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
],
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
]
)
TEST_CASE_1 = [
"value_1",
{"independent": False, "applied_labels": 1},
grid_1,
torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]]),
]
TEST_CASE_2 = [
"value_2",
{"independent": False, "applied_labels": [2]},
grid_1,
torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 0]]]]),
]
TEST_CASE_3 = [
"independent_value_1_2",
{"independent": True, "applied_labels": [1, 2]},
grid_1,
torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 1, 0], [2, 2, 0, 0, 0]]]]),
]
TEST_CASE_4 = [
"dependent_value_1_2",
{"independent": False, "applied_labels": [1, 2]},
grid_1,
torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]]),
]
TEST_CASE_5 = [
"value_1",
{"independent": True, "applied_labels": [1]},
grid_2,
torch.tensor([[[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 0]]]]),
]
TEST_CASE_6 = [
"independent_value_1_2",
{"independent": True, "applied_labels": [1, 2]},
grid_2,
torch.tensor([[[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 0]]]]),
]
TEST_CASE_7 = [
"dependent_value_1_2",
{"independent": False, "applied_labels": [1, 2]},
grid_2,
torch.tensor([[[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 1]]]]),
]
TEST_CASE_8 = [
"value_1_connect_1",
{"independent": False, "applied_labels": [1], "connectivity": 1},
grid_1,
torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 0, 0], [2, 2, 0, 0, 2]]]]),
]
TEST_CASE_9 = [
"independent_value_1_2_connect_1",
{"independent": True, "applied_labels": [1, 2], "connectivity": 1},
grid_1,
torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 0, 0], [2, 2, 0, 0, 0]]]]),
]
TEST_CASE_10 = [
"dependent_value_1_2_connect_1",
{"independent": False, "applied_labels": [1, 2], "connectivity": 1},
grid_1,
torch.tensor([[[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 0, 0], [2, 2, 0, 0, 0]]]]),
]
TEST_CASE_11 = [
"onehot_independent_batch_2_apply_label_1_connect_1",
{"independent": True, "applied_labels": [1], "connectivity": 1},
grid_3,
torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
],
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
]
),
]
TEST_CASE_12 = [
"onehot_independent_batch_2_apply_label_1_connect_2",
{"independent": True, "applied_labels": [1], "connectivity": 2},
grid_3,
torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
],
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
]
),
]
TEST_CASE_13 = [
"onehot_independent_batch_2_apply_label_1_2_connect_2",
{"independent": True, "applied_labels": [1, 2], "connectivity": 2},
grid_3,
torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
],
],
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
]
),
]
TEST_CASE_14 = [
"onehot_dependent_batch_2_apply_label_1_2_connect_2",
{"independent": False, "applied_labels": [1, 2], "connectivity": 2},
grid_3,
torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
],
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
]
),
]
TEST_CASE_15 = [
"onehot_dependent_batch_2_apply_label_1_2_connect_1",
{"independent": False, "applied_labels": [1, 2], "connectivity": 1},
grid_3,
torch.tensor(
[
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
],
],
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
]
),
]
VALID_CASES = [
TEST_CASE_1,
TEST_CASE_2,
TEST_CASE_3,
TEST_CASE_4,
TEST_CASE_5,
TEST_CASE_6,
TEST_CASE_7,
TEST_CASE_8,
TEST_CASE_9,
TEST_CASE_10,
TEST_CASE_11,
TEST_CASE_12,
TEST_CASE_13,
TEST_CASE_14,
TEST_CASE_15,
]
ITEST_CASE_1 = ["no_applied_labels_for_single_channel", {"independent": False}, grid_1, TypeError]
ITEST_CASE_2 = ["no_applied_labels_for_multi_channel", {"independent": False}, grid_3, TypeError]
INVALID_CASES = [ITEST_CASE_1, ITEST_CASE_2]
class TestKeepLargestConnectedComponent(unittest.TestCase):
@parameterized.expand(VALID_CASES)
def test_correct_results(self, _, args, tensor, expected):
converter = KeepLargestConnectedComponent(**args)
if torch.cuda.is_available():
result = converter(tensor.clone().cuda())
assert torch.allclose(result, expected.cuda())
else:
result = converter(tensor.clone())
assert torch.allclose(result, expected)
@parameterized.expand(INVALID_CASES)
def test_raise_exception(self, _, args, tensor, expected_error):
with self.assertRaises(expected_error):
converter = KeepLargestConnectedComponent(**args)
if torch.cuda.is_available():
_ = converter(tensor.clone().cuda())
else:
_ = converter(tensor.clone())
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | precision-medicine-um.noreply@github.com |
4944988e1174b41fc6e136632d567e3e7c6af551 | 185dd45fff20fa7fa9b559f3653e542b3f5ff67a | /userbot/modules/cybermisc.py | 85df6c4a9816d6de2829c6b479071b774e59e9e9 | [
"MIT"
] | permissive | TrendingTechnology/CyberUserBot | ba9a9f47565f136648ca268db889557652015de5 | cfd96d5026bf78678abcea3f51fbdde328f23fd7 | refs/heads/master | 2023-06-25T12:34:33.640280 | 2021-07-29T13:37:13 | 2021-07-29T13:37:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,432 | py | # Copyright 2021 (C) FaridDadashzade
#
# CyberUserBot - Faridxz
#
# oğurlayan peysərdi #
import requests
import re
import datetime
import logging
import bs4
import os
import asyncio
import time
import html
from telethon import *
from telethon import events
from telethon import utils
from telethon.tl import functions
from datetime import datetime
from userbot.cmdhelp import CmdHelp
from userbot import bot
from telethon.tl.types import UserStatusEmpty, UserStatusLastMonth, UserStatusLastWeek, UserStatusOffline, UserStatusOnline, UserStatusRecently, ChatBannedRights, ChannelParticipantsKicked
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.functions.contacts import BlockRequest, UnblockRequest
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from telethon.tl.types import MessageEntityMentionName
from asyncio import sleep
from userbot.events import register
from userbot import BOTLOG_CHATID, BOTLOG, SUDO_ID
async def get_user_from_event(event):
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.from_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit(f"**Xahiş edirəm bir istifadəçiyə cavab verin\nvə ya istifadəçi adı qeyd edin.**")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except Exception as err:
return await event.edit("Xəta baş verdi! \n **XƏTA**\n", str(err))
return user_obj, extra
async def get_user_from_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
try:
from userbot import client2, client3
except BaseException:
client2 = client3 = None
@register(outgoing=True, disable_errors=True, pattern=r"^\.gkick(?: |$)(.*)")
@register(incoming=True, from_users=SUDO_ID, pattern="^.cgkick$")
async def gspide(rk):
lazy = rk
sender = await lazy.get_sender()
me = await lazy.client.get_me()
if not sender.id == me.id:
rkp = await lazy.edit("`İstifadəçi bütün qruplardan atılır..`")
else:
rkp = await lazy.edit("`İstifadəçi bütün qruplardan atılır...`")
me = await rk.client.get_me()
await rkp.edit(f"**Hazırlanır...**")
my_mention = "[{}](tg://user?id={})".format(me.first_name, me.id)
f"@{me.username}" if me.username else my_mention
await rk.get_chat()
a = b = 0
if rk.is_private:
user = rk.chat
reason = rk.pattern_match.group(1)
else:
rk.chat.title
try:
user, reason = await get_user_from_event(rk)
except BaseException:
pass
try:
if not reason:
reason = 'Gizli'
except BaseException:
return await rkp.edit(f"**Xəta!\nNaməlum istifadəçi.**")
if user:
if user.id == 1527722982:
return await rkp.edit(f"`Xəta!`\n`Bunu C Y B Σ R UserBot sahibinə edə bilmərəm!`")
try:
await rk.client(BlockRequest(user))
await rk.client(UnblockRequest(user))
except BaseException:
pass
testrk = [d.entity.id for d in await rk.client.get_dialogs() if (d.is_group or d.is_channel)]
for i in testrk:
try:
await rk.client.edit_permissions(i, user, view_messages=False)
await rk.client.edit_permissions(i, user, send_messages=True)
a += 1
await rkp.edit(f"**İstifadəçi qrup/kanallardan atılır.\n{a} qrup/kanaldan atıldı...**")
except BaseException:
b += 1
else:
await rkp.edit(f"**Bir istifadəçiyə cavab verin.**")
return await rkp.edit(f"**[{user.first_name}](tg://user?id={user.id}) {a} qrup/kanallardan atıldı.**")
@register(outgoing=True, disable_errors=True, incoming=True, func=lambda e: e.mentioned)
async def ltgm(event):
hmm = await event.get_chat()
if BOTLOG_CHATID:
sender = await event.get_sender()
await asyncio.sleep(5)
if not event.is_private and not (await event.get_sender()).bot:
await event.client.send_message(
BOTLOG_CHATID,
f"#TAG \n<b>Mesajı göndərən : </b><a href = 'tg://user?id={sender.id}'> {sender.first_name}</a>\
\n<b>Qrup : </b><code>{hmm.title}</code>\
\n<b>Mesaj : </b><a href = 'https://t.me/c/{hmm.id}/{event.message.id}'> link</a>",
parse_mode="html",
link_preview=True,
)
e = await event.client.get_entity(int(BOTLOG_CHATID))
fwd_message = await event.client.forward_messages(
e,
event.message,
silent=True
)
else:
if event.is_private:
if not (await event.get_chat()).bot:
await event.client.send_message(
BOTLOG_CHATID,
f"#TAG \n<b>Mesajı göndərən : </b><a href = 'tg://user?id={sender.id}'> {sender.first_name}</a>\
\n<b>ID : </b><code>{sender.id}</code>",
parse_mode="html",
link_preview=True,
)
e = await event.client.get_entity(int(BOTLOG_CHATID))
fwd_message = await event.client.forward_messages(
e,
event.message,
silent=True
)
@register(outgoing=True, pattern="^.pm ?(.*)")
async def pm(event):
p = event.pattern_match.group(1)
m = p.split(" ")
chat_id = m[0]
try:
chat_id = int(chat_id)
except BaseException:
pass
msg = ""
mssg = await event.get_reply_message()
if event.reply_to_msg_id:
await event.client.send_message(chat_id, mssg)
await event.edit("**C Y B Ξ R mesajınızı göndərdi ✔️**")
for i in m[1:]:
msg += i + " "
if msg == "":
return
try:
await event.client.send_message(chat_id, msg)
await event.edit("**C Y B Ξ R mesajınızı göndərdi ✔️**")
except BaseException:
await event.edit("@TheCyberUserBot mesajınızı göndərə bilmədi :(")
@register(outgoing=True, pattern="^.undelete(?: |$)(.*)")
async def undelete(event):
if event.fwd_from:
return
c = await event.get_chat()
if c.admin_rights or c.creator:
a = await bot.get_admin_log(event.chat_id, limit=5, search="", edit=False, delete=True)
for i in a:
await event.reply(i.original.action.message)
else:
await event.edit("Bu əmri yerinə yetirmək üçün admin olmalısınız!")
await asyncio.sleep(3)
await event.delete()
@register(outgoing=True, groups_only=True, disable_errors=True, pattern=r"^\.unbanall(?: |$)(.*)")
async def _(cyber):
await cyber.edit("`Qadağan olunmuş istifadəçiləri axtarıram...`")
p = 0
(await cyber.get_chat()).title
async for i in cyber.client.iter_participants(
cyber.chat_id,
filter=ChannelParticipantsKicked,
aggressive=True,
):
try:
await cyber.client.edit_permissions(cyber.chat_id, i, view_messages=True)
p += 1
except BaseException:
pass
await cyber.edit("`Qadağan olunmuş istifadəçilər siyahıdan silindi...`")
@register(outgoing=True, pattern="^.sendbot (.*)")
async def sendbot(cyber):
if cyber.fwd_from:
return
chat = str(cyber.pattern_match.group(1).split(' ', 1)[0])
link = str(cyber.pattern_match.group(1).split(' ', 1)[1])
if not link:
return await cyber.edit("`Bağışlayın, heçnə tapa bilmədim.`")
botid = await cyber.client.get_entity(chat)
await cyber.edit("```Hazırlanır...```")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=botid))
msg = await bot.send_message(chat, link)
response = await response
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await cyber.reply(f"`Xahiş edirəm` {chat} `-u blokdan çıxarın və yenidən yoxlayın.`")
return
except :
await cyber.edit("`Belə bir bot yoxdur :(`")
await sleep(2)
return await cyber.delete()
await cyber.edit(f"`Göndərilən mesaj` : {link}"
f"\n`Kimə` : {chat}")
await bot.send_message(cyber.chat_id, response.message)
await bot.send_read_acknowledge(cyber.chat_id)
""" prosesi yerine yetirdikden sonra silmesi ucun """
await cyber.client.delete_messages(conv.chat_id,
[msg.id, response.id])
Help = CmdHelp('cybermisc')
Help.add_command('undelete', None, 'Bir qrupda silinmiş 5 mesajı göndərər.')
Help.add_command('unbanall', None, 'Qrupda qadağan edilmiş bütün istifadəçilərin qadağasını silər.')
Help.add_command('sendbot', '<@botun-istifadeci-adi> <mesaj>', 'Yazdığınız əmri qeyd etdiyiniz bota göndərər və botun cavabını atar')
Help.add()
Help = CmdHelp('pm')
Help.add_command('pm', '<@istifadeci-adi> <mesaj>', 'Qeyd etdiyiniz mesajı istədiyiniz şəxsə göndərər.')
Help.add()
| [
"noreply@github.com"
] | TrendingTechnology.noreply@github.com |
c03346107f1b81fd275332881194d8c389f4f034 | 307d3837d31f9e3728af2b62ca51ebf63fe6ec6b | /hall_of_fame/lysuk96/ETC/LTC_771.py | b7aea3d0e755392baf615e32bfc11226a7635af2 | [] | no_license | ellynhan/challenge100-codingtest-study | 905043497d154b8a7333ca536e536d013f6e7454 | bcdc6d04f13b12ba80b42e066f9d244d7c2cc698 | refs/heads/master | 2023-09-01T14:10:13.481013 | 2023-08-27T14:38:52 | 2023-08-27T14:38:52 | 401,561,230 | 162 | 176 | null | 2023-09-09T14:56:25 | 2021-08-31T03:30:36 | C++ | UTF-8 | Python | false | false | 83 | py | def numJewelsInStones(self, J:str, S:str) -> int:
return sum(s in J for s in S) | [
"wown252@naver.com"
] | wown252@naver.com |
edfad5971dddb8103a563afe3f46f6723ef8c121 | 1273032fadcc1abc6e660a0e076b3ce3b3f893ab | /goccina.com/news/migrations/0007_news_slug.py | 983e617db8e7506c2233352d66476d661df39d7d | [] | no_license | munisisazade/heroku_app | ed998ba147aea6fdef9884f26f2563c6f0a2741c | 65ee5b5b434a1c70bc84d5b7bc63c35c2a6aedb8 | refs/heads/master | 2021-01-10T23:13:06.104198 | 2016-10-11T15:33:40 | 2016-10-11T15:33:40 | 70,607,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-23 23:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0006_auto_20160924_0233'),
]
operations = [
migrations.AddField(
model_name='news',
name='slug',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name='Slug'),
),
]
| [
"munisisazade@gmail.com"
] | munisisazade@gmail.com |
aa99931fa026da073c3ccbdea753003a3ddf62ec | 59239e9aecc4b0611cd1f82dad74a13b21c0a97e | /rest/Api/imanwser.py | d4b75c496cfc85b66207de2b6c8ad645710390dc | [] | no_license | charlesXu86/FAQ | 53a8160fef2920c1e5005ea4f55a642071986494 | d5e253f44377f63b3af9accd9eab39fc599ed7a7 | refs/heads/master | 2023-01-10T10:45:21.403238 | 2020-11-11T13:40:23 | 2020-11-11T13:40:23 | 278,859,443 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | # -*- coding: utf-8 -*-
"""
@Author : Xu
@Software: PyCharm
@File : imanwser.py
@Time : 2020/7/11 6:17 下午
@Desc : im
"""
from model.FAQ import FAQ
def get_imanwser(msg):
result = {
'domain': '',
'content': '',
'type': 0,
'classId': 0,
'confidence': 0.978787,
'errorTime': 0,
'questionId': 0,
'transferFlag': 0,
'relatedQuestion': [],
'questionList': [],
'link': 'zrg'
}
robotUserData = {
"showEvaluate": 0,
"answer_type": "faq",
"is_many_answer": False,
"highLight": 0, # 是否需要前端进行高亮显示,一般在触发敏感词严重时会触发。
"sensitive_hit": "警告", # 敏感词,触发了哪个敏感词就列出来
"docid": 260801,
"search_id": "2156_1001_594726674", # 机器人标准问的Id,用于对机器人进行评价
"raw_query": ""
}
robot = FAQ(usedVec=False)
anwser = robot.answer(msg, 'simple_pos')
result['user_query'] = msg
robotUserData['raw_query'] = msg
result['content'] = anwser
result['robotUserData'] = robotUserData
return result
def get_imanwser2(msg):
result = {
'content': '',
'confidence': 0.978787
}
robot = FAQ(usedVec=False)
anwser = robot.answer(msg, 'simple_pos')
result['user_query'] = msg
result['content'] = anwser
return result | [
"charlesxu86@163.com"
] | charlesxu86@163.com |
b2aff70c810164f73e9a845e63406d1878b09603 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/nos/v6_0_2f/brocade_firmware_rpc/activate_status/input/__init__.py | 5e225c3a63434f93fb5ae9625fe499cce76048a9 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,831 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class input(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-firmware - based on the path /brocade_firmware_rpc/activate-status/input. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__rbridge_id',)
_yang_name = 'input'
_rest_name = 'input'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__rbridge_id = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_firmware_rpc', u'activate-status', u'input']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'activate-status', u'input']
def _get_rbridge_id(self):
"""
Getter method for rbridge_id, mapped from YANG variable /brocade_firmware_rpc/activate_status/input/rbridge_id (rbridge-ids-all-type)
"""
return self.__rbridge_id
def _set_rbridge_id(self, v, load=False):
"""
Setter method for rbridge_id, mapped from YANG variable /brocade_firmware_rpc/activate_status/input/rbridge_id (rbridge-ids-all-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_rbridge_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rbridge_id() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rbridge_id must be of a type compatible with rbridge-ids-all-type""",
'defined-type': "brocade-firmware:rbridge-ids-all-type",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)""",
})
self.__rbridge_id = t
if hasattr(self, '_set'):
self._set()
def _unset_rbridge_id(self):
self.__rbridge_id = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9]+(-[0-9]+)?(,[0-9]+(-[0-9]+)?)*'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'all'}),], is_leaf=True, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u"Please enter 'all' for activating all nodes in the logical-chassis or individual rbridge-ids of the form 1,2,3-6"}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='rbridge-ids-all-type', is_config=True)
rbridge_id = __builtin__.property(_get_rbridge_id, _set_rbridge_id)
_pyangbind_elements = {'rbridge_id': rbridge_id, }
| [
"badaniya@brocade.com"
] | badaniya@brocade.com |
090cb207ec1894ba12c2bc9f032823a404b11034 | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /AtCoder/AtCoder Beginner Contest 261/C.py | 80c46b4e0f48dc7c68108583ac091a34d9dde988 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from collections import defaultdict
n = int(raw_input())
d = defaultdict(int)
for i in xrange(n):
name = raw_input()
if name not in d:
d[name] += 1
print name
else:
print "%s(%d)" % (name, d[name])
d[name] += 1
'''
^^^^^TEST^^^^
5
newfile
newfile
newfolder
newfile
newfolder
---------
newfile
newfile(1)
newfolder
newfile(2)
newfolder(1)
$$$TEST$$$$
^^^^TEST^^^^
11
a
a
a
a
a
a
a
a
a
a
a
------------
a
a(1)
a(2)
a(3)
a(4)
a(5)
a(6)
a(7)
a(8)
a(9)
a(10)
$$$$TEST$$$$
'''
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
f97fc7bc26ed55e11155f9b04cd3b80b9d43a42d | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/imagesegment/model/update_image_segment_response_wrapper_body.py | 9612937e3f8c852bc61890fa9a48e8d8df79d772 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 11,216 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.imagesegment.model.image_segment_type import ImageSegmentType
globals()['ImageSegmentType'] = ImageSegmentType
class UpdateImageSegmentResponseWrapperBody(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([ImageSegmentType],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""UpdateImageSegmentResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([ImageSegmentType]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""UpdateImageSegmentResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([ImageSegmentType]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"jiangyuan04@baidu.com"
] | jiangyuan04@baidu.com |
3246316975880ba09b2b8cf825732fd6fea112a2 | f4c0172e70ca5ffbe01695245e82a28291f88d04 | /v0.5.3-all/input_data.py | d80d7a4534edc1daf6653dc858591f3c3196b1fa | [] | no_license | huangxinkid/DeepLearning_Wavelet-LSTM | a84e667d5f2db477ac5a9993d8ae329ec9fd115f | b726f99a8631fc48e6943655ace222b0f6b0290b | refs/heads/master | 2020-03-24T07:11:52.832149 | 2018-05-30T18:43:38 | 2018-05-30T18:43:38 | 142,556,218 | 0 | 1 | null | 2018-07-27T09:21:18 | 2018-07-27T09:21:18 | null | UTF-8 | Python | false | false | 5,692 | py | # coding:utf-8
import os.path
import sys
import re
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from MyController import Algorithm_CWT
from Model.Seg import SegFile
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#python pkl 文件读写
import pickle as pickle
import json
def myJsonLoad(filePath):
'''把文件打开从字符串转换成数据类型'''
with open(filePath,'rb') as load_file:
load_dict = json.load(load_file)
return load_dict
class MyData():
def __init__(self):
self.data_filePath = []
self.data_fileName = []
self.data_tpye = []
self.data = []
self.labels = []
# 遍历指定目录,显示目录下的所有文件名
def eachFile(filepath):
pathDir = os.listdir(filepath)
data = MyData()
for allDir in pathDir:
child = os.path.join('%s/%s' % (filepath, allDir))
if os.path.isfile(child):
data.data_filePath.append(child)
data.data_fileName.append(allDir)
theTpye = re.split('\.',allDir)[0]
# print(theTpye)
data.data_tpye.append( theTpye )
# # 显示
# for i in array:
# print(i)
return data
def myReadFile(py_data):
# 新建一个Session
with tf.Session() as sess:
# path = py_data.data_filePath[0]
for path in py_data.data_filePath:
# 读取文件
image_raw_data = tf.gfile.FastGFile(path, 'rb').read()
# 解码
img_data = tf.image.decode_jpeg(image_raw_data)
# print(img_data)
# 转灰度图
# img_data = sess.run(tf.image.rgb_to_grayscale(img_data))
# 改变图片尺寸
resized = tf.image.resize_images(img_data, [28, 28], method=0)
# 设定 shape
# resized = tf.reshape(resized, [28, 28, 1]) #最后一维代表通道数目,如果是rgb则为3
resized = tf.reshape(resized, [28, 28, 3]) #最后一维代表通道数目,如果是rgb则为3
# 标准化
standardization_image = tf.image.per_image_standardization(resized)#标准化
# print(standardization_image)
# print(standardization_image.eval())
resized = tf.reshape(standardization_image, [-1]) #最后一维代表通道数目,如果是rgb则为3
# resized = tf.reshape(resized, [-1]) #最后一维代表通道数目,如果是rgb则为3
## 链接
## resized = tf.expand_dims(resized, 0) # 增加一个维度
## print(resized)
## print(py_data.data)
## test_data = tf.concat(0, [test_data, resized])
py_data.data.append(resized.eval())
'''
# #验证数据转换正确
resized = tf.reshape(py_data.data[0], [28, 28, 3])
resized = np.asarray(resized.eval(), dtype='uint8')
plt.imshow(resized)
plt.show()
'''
def saveData(py_data, filePath_data, filePath_labels):
pass
'''
with tf.Session() as sess:
train_data =tf.convert_to_tensor(np.array( trainData.data ) )
'''
data = np.array( py_data.data )
labels = py_data.labels
# import os
if os.path.exists(filePath_data): #删除文件,可使用以下两种方法。
os.remove(filePath_data) #os.unlink(my_file)
if os.path.exists(filePath_labels): #删除文件,可使用以下两种方法。
os.remove(filePath_labels) #os.unlink(my_file)
with open(filePath_data,'wb') as f:
pickle.dump(data, f)
with open(filePath_labels,'wb') as f:
pickle.dump(labels, f)
print('\ndone!')
def run(filePath_loadData, filePath_data, filePath_labels):
loadData = eachFile(filePath_loadData) #注意:末尾不加/
# myReadFile(loadData)
# saveData(loadData, filePath_data, filePath_labels)
print(loadData.data_fileName)
'''
trainData = eachFile("../Data/logos/train") #注意:末尾不加/
# for i in range(0,len(data.data_fileName)):
# print(data.data_tpye[i])
# print(data.data_oneHot_labels[i])
myReadFile(trainData)
saveData(trainData, 'Model/train_data.plk', 'Model/train_labels.plk')
# print(trainData.data[0].shape)
# print(trainData.data[0])
'''
def opeanFile(fileName):
fileName = fileName
segFile = SegFile()
reply = segFile.loadFile(fileName)
if(reply != 0):
print('error!')
else:
# cwtmatr,freqs = Algorithm.MyPywtCWT( self.segFile.dataList[ self.segFile.TapeNumCurrent ].data )
# cwtmatr = Algorithm_CWT.MyScipyCwt(self.segFile.dataList[ self.segFile.TapeNumCurrent ].data, 128)
print('ok')
print(len(segFile.dataList[1].data))
print('ok')
cwtmatr = Algorithm_CWT.MyWavelets( segFile.dataList[1].data, 128)
print(type(cwtmatr))
print(len(cwtmatr))
print(len(cwtmatr[0]))
if __name__ == "__main__":
print('目前系统的编码为:',sys.getdefaultencoding())
########################################################################
''' 训练数据生成 '''
path = 'C:/锚索测量数据库/LSTMs训练数据/'
path_TagJson = 'C:/锚索测量数据库/LSTMs训练数据/tag.json'
TagDict = myJsonLoad(path_TagJson)
for i in TagDict:
for j in range(1,5):
print(i['items'][str(j)])
opeanFile(path + '1.seg')
# run( path, 'Model/train_seg_data.plk', 'Model/train_seg_labels.plk')
# #注意:path末尾不加/
| [
"hello.sea@qq.com"
] | hello.sea@qq.com |
d1495d37ceae7e3add58730e45b460716c5aef2f | 8e52db8ea2d4911ab852420e17b40ac9c0d6099b | /app/apps/coreExtend/context_processors.py | 825742d8964ccb45a7983f03bbb8f40dbc6e31a3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | hilam/alifewellplayed.com | 11519cfea2507e0bc98393ae31ded1b1a7eb68f5 | 008645d1ccf1774f9fd3a4440cd82a9fc17e944e | refs/heads/master | 2021-05-06T11:16:49.863554 | 2017-12-03T03:50:18 | 2017-12-03T03:50:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | from django.conf import settings
import datetime
def template_settings(request):
return {
'network_name': settings.SITE_NAME,
'network_desc': settings.SITE_DESC,
'network_author': settings.SITE_AUTHOR,
'network_url': settings.SITE_URL,
'network_register': settings.ALLOW_NEW_REGISTRATIONS,
'BASE_URL': 'http://' + request.get_host(),
'BASE_HOST': request.get_host().split(':')[0],
}
def template_times(request):
return {
'today': datetime.datetime.now(),
'yesterday': datetime.datetime.now() - datetime.timedelta(1),
}
| [
"underlost@gmail.com"
] | underlost@gmail.com |
efe7f75a141578bfe254481977e20d289245b4f6 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2527/60829/277886.py | 63da376f91a582d2e5ce16635bb2d6e52a6c1726 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | x=list(input())
a=int(input())
b=int(input())
c=int(input())
d=[]
res=[]
for i in range(0,len(x)):
y=x[i]
if a==1:
if y[2]==1 and y[3]<b and y[4]<c:
res.append(y)
else:
if y[3]<b and y[4]<c:
res.append(y)
d={}
for i in range(0,len(res)):
d[res[i][0]]=d[i][1]
d.sort()
print(d) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
424bcd83af4331477aaa3e80b33dd5e98903daa6 | d3a204a438d7f2d439b3f18be9cf1271f5ab475f | /project1_gpu/transformer_model.py | 4d2a48f72b58a478d3ec398189f3156529324d23 | [] | no_license | saga9017/laboratory | f2f0d07fb691bd89e6f614cca7e377e82da5f656 | 77c53c36a99c0da17f23df235edacbcf83469b42 | refs/heads/master | 2021-05-16T19:32:23.477377 | 2020-03-27T06:44:16 | 2020-03-27T06:44:16 | 250,438,442 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,834 | py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from datetime import datetime
import numpy as np
import unicodedata
import re
import random
import math
import os
from preprocessing import matrix_max_len_re_label_dic
os.environ["CUDA_VISIBLE_DEVICES"] = '0' # GPU number to use
print('gpu :', torch.cuda.is_available())
torch.set_default_dtype(torch.double)
torch.autograd.set_detect_anomaly(True)
batch_size = 128
unknown_number = 1
torch.manual_seed(10)
embedded_matrix, max_len, re_label_dic =matrix_max_len_re_label_dic()
def norm_forward(X, gamma, beta):
mu = torch.mean(X, dim=2)
var = torch.var(X, dim=2)
X_norm = torch.div(X - mu.view(X.shape[0], X.shape[1], 1), torch.sqrt(var.view(X.shape[0], X.shape[1], 1) + 1e-8))
out = gamma * X_norm + beta
return out
def norm_forward_in_attention(X, gamma, beta):
mu = torch.mean(X, dim=3)
var = torch.var(X, dim=3)
X_norm = torch.div(X - mu.view(X.shape[0], X.shape[1], X.shape[2], 1), torch.sqrt(var.view(X.shape[0], X.shape[1],X.shape[2], 1) + 1e-8))
out = (gamma * X_norm.view(8,-1) + beta).view(8, X.shape[1], X.shape[2], X.shape[3])
return out
def batchnorm_forward(X, gamma, beta):
mu = torch.mean(X, dim=0)
var = torch.var(X, dim=0)
X_norm = torch.div((X - mu), torch.sqrt(var + 1e-8))
out = gamma * X_norm + beta
return out
def sigmoid(z):
return 1 / (1 + torch.exp(-z))
class Encoder(nn.Module):
def __init__(self, hidden_dim=300, hidden_dim_=512): # default=512
# Assign instance variables
super(Encoder, self).__init__()
self.hidden = hidden_dim_
self.softmax = nn.Softmax(3)
self.WQ_e_L1 = Parameter(torch.randn(hidden_dim_, hidden_dim).cuda())
self.WK_e_L1 = Parameter(torch.randn(hidden_dim_, hidden_dim).cuda())
self.WV_e_L1 = Parameter(torch.randn(hidden_dim_, hidden_dim).cuda())
self.WO_e_L1 = Parameter(torch.randn(hidden_dim_, hidden_dim).cuda())
self.W1_e = Parameter(torch.randn(hidden_dim, 4 * hidden_dim).cuda())
self.b1_e = Parameter(torch.randn(4 * hidden_dim).cuda())
self.W2_e = Parameter(torch.randn(4 * hidden_dim, hidden_dim).cuda())
self.b2_e = Parameter(torch.randn(hidden_dim).cuda())
self.gamma_a1 = Parameter(torch.ones((8, 1)).cuda())
self.beta_a1 = Parameter(torch.zeros((8, 1)).cuda())
self.gamma1 = Parameter(torch.tensor(1.0).cuda())
self.beta1 = Parameter(torch.tensor(0.0).cuda())
self.gamma2 = Parameter(torch.tensor(1.0).cuda())
self.beta2 = Parameter(torch.tensor(0.0).cuda())
def multi_head_self_attention(self, x): # x : (batch_size, input_len, hidden_dim)
d_k = x.shape[-1]
q=torch.matmul(x, self.WQ_e_L1.transpose(0,1)).view(x.shape[0], x.shape[1], -1, 8).transpose(3,2).transpose(2,1).transpose(1,0)
k=torch.matmul(x, self.WK_e_L1.transpose(0,1)).view(x.shape[0], x.shape[1], -1, 8).transpose(3,2).transpose(2,1).transpose(1,0)
v=torch.matmul(x, self.WV_e_L1.transpose(0,1)).view(x.shape[0], x.shape[1], -1, 8).transpose(3,2).transpose(2,1).transpose(1,0)
e_pre=torch.matmul(q.clone(), k.clone().transpose(2,3)) / math.sqrt(d_k)
e = norm_forward_in_attention(e_pre, self.gamma_a1, self.beta_a1)
alpha=self.softmax(e)
head1=torch.matmul(alpha.clone(), v.clone())
a = torch.cat((head1[0], head1[1], head1[2], head1[3], head1[4], head1[5], head1[6], head1[7]), 2)
result = torch.matmul(a, self.WO_e_L1)
return result # (input_len, hidden)
def add_Norm(self, x, y, gamma, beta): # x : (input_len, hidden)
e = norm_forward(x + y, gamma, beta)
return e
def ffn1(self, x): # feed forward network x : (batch_size, input_len, hidden)
linear1 = torch.matmul(x, self.W1_e) + self.b1_e # (batch_size, input_len, 4*hidden)
relu = torch.max(torch.zeros_like(linear1).cuda(), linear1) # (batch_size, input_len, 4*hidden)
linear2 = torch.matmul(relu, self.W2_e) + self.b2_e # (batch_size, input_len, hidden)
return linear2 # (input_len, 4*hidden)
def forward(self, x):
x1=self.multi_head_self_attention(x)
x2=self.add_Norm(x1, x, self.gamma1, self.beta1)
x3=self.ffn1(x2)
x4=self.add_Norm(x3, x2, self.gamma2, self.beta2)
return x4
class transformer(nn.Module):
def __init__(self, word_dim2, hidden_dim=300, hidden_dim_=512, label_smoothing=0.1): # default=512
# Assign instance variables
super(transformer, self).__init__()
self.hidden = hidden_dim_
self.word_dim2=word_dim2
self.label_smoothing=label_smoothing
self.V_e=Parameter(torch.randn(hidden_dim, 1).cuda())
self.V_d = Parameter(torch.randn(max_len, word_dim2).cuda())
self.Loss = 0
self.softmax = nn.Softmax(2)
self.encoder1=Encoder(hidden_dim, hidden_dim_)
self.encoder2 = Encoder(hidden_dim, hidden_dim_)
self.encoder3 = Encoder(hidden_dim, hidden_dim_)
self.encoder4 = Encoder(hidden_dim, hidden_dim_)
self.encoder5 = Encoder(hidden_dim, hidden_dim_)
self.encoder6 = Encoder(hidden_dim, hidden_dim_)
self.gamma = Parameter(torch.tensor(1.0).cuda())
self.beta = Parameter(torch.tensor(0.0).cuda())
def input_embedding(self, x): # x: (batch, input_len, )
return embedded_matrix[x] # (input_len, hidden_dim)
def Positional_Encoding(self, x): # x : (batch_size, input_len, hidden_dim) or (batch_size, output_len, hidden_dim)
table = torch.zeros_like(x[0]).cuda()
a, b = table.shape
for pos in range(a):
for i in range(b):
if i % 2 == 0:
table[pos][i] = math.sin(pos / math.pow(10000, i / b))
else:
table[pos][i] = math.cos(pos / math.pow(10000, (i - 1) / b))
return x + table
def forward_propagation(self, x):
x1 = self.input_embedding(x) # (input_len, hidden)
x2 = self.Positional_Encoding(x1) # (input_len, hidden)
########################################################
x3=self.encoder1.forward(x2+x1)
x4 = self.encoder2.forward(x3+x2+x1)
x5 = self.encoder3.forward(x4+x3+x2+x1)
x6 = self.encoder4.forward(x5+x4+x3+x2+x1)
x7 = self.encoder5.forward(x6+x5+x4+x3+x2+x1)
x8 = self.encoder6.forward(x7+x6+x5+x4+x3+x2+x1)
x9 = torch.matmul(x8, self.V_e)
########################################################
x10 = self.softmax(norm_forward(x9.transpose(1,2).matmul(self.V_d), self.gamma, self.beta)).view(x9.shape[0], -1)
return x10
def bptt(self, x, y): # (batch_size, out_len)
x10 = self.forward_propagation(x)
loss=torch.sum(-torch.log(torch.gather(x10, 1, y.view(-1,1))))/x.shape[0]
return loss
def predict(self, x):
x1 = self.input_embedding(x) # (input_len, hidden)
x2 = self.Positional_Encoding(x1) # (input_len, hidden)
########################################################
x3 = self.encoder1.forward(x2 + x1)
x4 = self.encoder2.forward(x3 + x2 + x1)
x5 = self.encoder3.forward(x4 + x3 + x2 + x1)
x6 = self.encoder4.forward(x5 + x4 + x3 + x2 + x1)
x7 = self.encoder5.forward(x6 + x5 + x4 + x3 + x2 + x1)
x8 = self.encoder6.forward(x7 + x6 + x5 + x4 + x3 + x2 + x1)
x9 = torch.matmul(x8, self.V_e)
########################################################
x10 = self.softmax(norm_forward(x9.transpose(1, 2).matmul(self.V_d), self.gamma, self.beta)).view(x9.shape[0],
-1)
output=torch.tensor([re_label_dic[i.item()] for i in torch.argmax(x10, 1)]).cuda()
return output
# Performs one step of SGD.
def numpy_sdg_step(self, x, y, learning_rate):
# Calculate the gradients
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)
loss = self.bptt(x, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss
def epoch_eval(self, x_dev, y_dev):
score=torch.tensor(0.0)
if len(y_dev) % batch_size == 0:
total_step = int(len(y_dev) / batch_size)
else:
total_step = int(len(y_dev) / batch_size) + 1
for i in range(total_step):
if i == total_step - 1:
flags=(self.predict(x_dev[i * batch_size:])==y_dev[i * batch_size:])
score+=torch.sum(flags)
else:
flags = (self.predict(x_dev[i * batch_size:(i + 1) * batch_size]) == y_dev[i * batch_size:(i + 1) * batch_size])
score += torch.sum(flags)
return score/len(y_dev)
def train_with_batch(self, X_train, y_train, X_dev, y_dev, nepoch=100, evaluate_loss_after=5):
# We keep track of the losses so we can plot them later
losses = []
num_examples_seen = 1
Loss_len = 0
if len(y_train) % batch_size == 0:
total_step = int(len(y_train) / batch_size)
else:
total_step = int(len(y_train) / batch_size) + 1
for epoch in range(nepoch):
# For each training example...
for i in range(total_step):
# One SGD step
lrate = math.pow(self.hidden, -0.5) * min(math.pow(num_examples_seen, -0.5),
num_examples_seen * math.pow(400, -1.5))
if i == total_step - 1:
self.Loss += self.numpy_sdg_step(X_train[i * batch_size:],
y_train[i * batch_size:], lrate )
else:
self.Loss += self.numpy_sdg_step(X_train[i * batch_size:(i + 1) * batch_size],
y_train[i * batch_size:(i + 1) * batch_size], lrate )
Loss_len += 1
if num_examples_seen == total_step*nepoch:
last_loss= self.Loss.item() / Loss_len
elif num_examples_seen % 10 == 0:
time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(time, ' ', int(100 * num_examples_seen / (total_step * nepoch)), end='')
print('% 완료!!!', end='')
print(' lr :', lrate , ' loss :', self.Loss.item() / Loss_len)
self.Loss = 0
Loss_len = 0
num_examples_seen += 1
print('dev accuracy :', self.epoch_eval(X_dev, y_dev).item())
return last_loss | [
"saga9017@korea.ac.kr"
] | saga9017@korea.ac.kr |
85d39ae7b28e171faad85e8dc46454c04082b5b2 | 9e765b38a03c2996e221a42c2a0dbc0fe02824cb | /cracking_the_coding_interview_qs/17.26/sparse_similarity.py | 8f2387f5edc2669ca227f3afc0c75688a110fa18 | [
"Apache-2.0"
] | permissive | angelusualle/algorithms | f709b4ae0c3275cece204d5fb56fd6ec34b4683b | 86286a49db2a755bc57330cb455bcbd8241ea6be | refs/heads/main | 2023-07-02T19:25:11.720114 | 2021-08-12T16:33:00 | 2021-08-12T16:33:00 | 269,791,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 599 | py | from collections import defaultdict
# O(DW + PW)
def sparse_similarity(doc_words):
word_doc = defaultdict(set)
for k in doc_words:
for w in doc_words[k]:
word_doc[w].add(k)
doc_sim = []
for k in doc_words:
docs = set()
for w in doc_words[k]:
for d in word_doc[w]:
if d != k:
docs.add(d)
for d in docs:
sim = len(doc_words[k].intersection(doc_words[d])) / len(doc_words[k].union(doc_words[d]))
doc_sim.append('%s to %s : %f' % (k, d, sim))
return doc_sim
| [
"angelusualle@gmail.com"
] | angelusualle@gmail.com |
d01244a9f5f1e964c98b499b578a85e9ac829603 | 5a545262f7c053c1cfd1f7984664e3220c745161 | /exit_queue_tests/exit_queue_tester.py | 2be58b9aa2c02c4672079958833c81544f2b89c8 | [
"MIT"
] | permissive | ethereum/research | 2c523e5796cfdb6055e0107dc1768fbf164ecad0 | bb873f8ad0e673803ec6a55be26678e1f99b9ece | refs/heads/master | 2023-09-04T19:11:51.507361 | 2023-08-30T01:52:05 | 2023-08-30T01:52:05 | 42,808,596 | 1,774 | 603 | MIT | 2023-04-21T07:20:21 | 2015-09-20T10:13:12 | Python | UTF-8 | Python | false | false | 4,081 | py | from numpy.random import poisson
import math
# Target active staker size
TARGET_AMOUNT_STAKING = 312500
# Average time staking before withdrawal
AVG_STAKING_TIME = 500
# How many withdrawals are permitted in
# one day given a certain validator count?
def withdrawals_per_day(validators, total_eth_exiting):
# return (validators + total_eth_exiting) / 1.07 // 100
return validators // 100
# return validators * max(1, int(math.log2(total_eth_exiting))) / 13.5 // 100
# return int(1 + (total_eth_exiting * validators)**0.5) * 4.9 // 100
# Get the size of the largest staker. This assumes a
# Zipf's law distribution (ie. power law with power=1)
# where the nth largest staker is n times smaller than the
# largest staker. Calculates a value for the largest staker
# such that the total size of nonzero stakers equals the
# target amount staking.
def get_max_staker_size():
def get_sum(sz):
tot = 0
inc = 1
while sz // inc:
tot += (sz // inc) * inc
inc *= 2
return tot
size = 0
offset = TARGET_AMOUNT_STAKING
while offset:
if get_sum(size + offset) < TARGET_AMOUNT_STAKING:
size += offset
else:
offset //= 2
return size
# As a simplification, we make all stakers have validator sizes
# be close to the max size divided by a power of two
STAKER_SIZES = [get_max_staker_size()]
while STAKER_SIZES[-1] > 1:
STAKER_SIZES.append(STAKER_SIZES[-1] // 2)
# Active and not yet exiting stakers
stakers = {}
# Exiting stakers
exiting = {}
# The exit queue
exit_queue = []
# Total eth exiting
total_eth_exiting = 0
# How much of the first exiter's deposit we have processed
processing_current = 0
# Fill the staker set initially
for i, sz in enumerate(STAKER_SIZES):
stakers[sz] = poisson(2**i)
sz //= 2
# Count withdrawn stakers of each size, and total delays
# incurred by them, so we can eventually compute the average
withdrawn = {}
tot_delays = {}
print("Total staking ETH:", sum(k * v for k,v in stakers.items()))
for day in range(10000):
# Deposit new stakers at the rate needed to maintain the equilibrium size
for i, sz in enumerate(STAKER_SIZES):
stakers[sz] = stakers.get(sz, 0) + poisson(2**i / AVG_STAKING_TIME)
sz //= 2
# Each staker has a 1/AVG_STAKING_TIME probability of deciding to leave each day
for k in stakers.keys():
exit_count = min(poisson(stakers[k] / AVG_STAKING_TIME), stakers[k])
if exit_count > 0:
exit_queue.append((k, exit_count, day))
stakers[k] -= exit_count
exiting[k] = exiting.get(k, 0) + exit_count
total_eth_exiting += exit_count * k
total_validators = sum(k * v for k,v in stakers.items()) + sum(k * v for k,v in exiting.items())
# Process the queue
queue_to_empty_today = withdrawals_per_day(total_validators, total_eth_exiting)
while queue_to_empty_today > 0 and len(exit_queue) > 0:
key, exit_count, exit_day = exit_queue[0]
# Partially process the first exiter (exit next loop)
if key * exit_count > queue_to_empty_today + processing_current:
processing_current += queue_to_empty_today
queue_to_empty_today = 0
# Finish processing the first exiter (continue next loop)
else:
processing_current = 0
queue_to_empty_today -= key * exit_count - processing_current
exit_queue.pop(0)
exiting[key] -= exit_count
total_eth_exiting -= exit_count * key
withdrawn[key] = withdrawn.get(key, 0) + exit_count
tot_delays[key] = tot_delays.get(key, 0) + (day - exit_day) * exit_count
if day % 1000 == 999:
print("Report for day %d: %d total validators %d ETH in exit queue" % ((day+1), total_validators, total_eth_exiting))
print("Total delays in days")
for key in STAKER_SIZES:
print("%d: % .3f (min %.3f)" % (key, (tot_delays.get(key, 0) / withdrawn.get(key, 0.0001)), key / withdrawals_per_day(TARGET_AMOUNT_STAKING, key)))
| [
"v@buterin.com"
] | v@buterin.com |
c0f361d63969b7199316fa90ed6ae5fbb8b313bc | d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d | /test/test_fraud_registration_response.py | a1fe7795c438367145d0919119d7d783346461d5 | [] | no_license | begum-akbay/Python | 2075650e0ddbf1c51823ebd749742646bf221603 | fe8b47e29aae609b7510af2d21e53b8a575857d8 | refs/heads/master | 2023-03-28T00:11:00.997194 | 2021-03-25T16:38:17 | 2021-03-25T16:38:17 | 351,499,957 | 0 | 0 | null | 2021-03-25T16:38:17 | 2021-03-25T16:15:16 | Python | UTF-8 | Python | false | false | 1,178 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.fraud_registration_response import FraudRegistrationResponse # noqa: E501
from openapi_client.rest import ApiException
class TestFraudRegistrationResponse(unittest.TestCase):
"""FraudRegistrationResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFraudRegistrationResponse(self):
"""Test FraudRegistrationResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.fraud_registration_response.FraudRegistrationResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"emargules@bluepay.com"
] | emargules@bluepay.com |
0c5e91d431b0fb904a58137a798ae36099255259 | 578690b6babcbd691d8b6a96c65c9a17dcbbf27f | /build/catkin_generated/generate_cached_setup.py | 599e598cfa02738d95dc98f0cf8e325d40026202 | [
"MIT"
] | permissive | Sinchiguano/Multi-agent-system | d48c97b5c564cced68de096e0de2f05776d78e20 | 2b54ca6d08ea6fb42050aaf12788a34a53da60a1 | refs/heads/master | 2020-03-31T13:00:35.497228 | 2018-12-12T12:33:32 | 2018-12-12T12:33:32 | 152,238,167 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/casch/turtle/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/casch/turtle/devel/env.sh')
output_filename = '/home/casch/turtle/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"cesarsinchiguano@hotmail.es"
] | cesarsinchiguano@hotmail.es |
9babd344a68af296a4766860b116d07415079ed0 | 9c756eefb3a62841b516e6a7495020750cb2c5b7 | /scripts/train_lang_identifier.py | b3bdef265b2a2970cb8f8d79b8c1d89ea49f8865 | [
"Apache-2.0"
] | permissive | chartbeat-labs/textacy | d17c03d243b3af66798bef4b3adc574300c7724e | f08ecbc46020f514b8cbb024778ec4f80456291f | refs/heads/main | 2023-09-02T18:11:56.339778 | 2023-04-03T00:19:55 | 2023-04-03T00:19:55 | 51,014,761 | 2,152 | 310 | NOASSERTION | 2023-06-20T15:12:19 | 2016-02-03T16:52:45 | Python | UTF-8 | Python | false | false | 23,927 | py | """
### env
textacy==0.9.0
...
### fetch source data
- **Tatoeba:** A crowd-sourced collection of sentences and their translations into many languages. Style is relatively informal; subject matter is a variety of everyday things and goings-on. Source: https://tatoeba.org/eng/downloads.
- **Leipzig Corpora:** A collection of corpora for many languages pulling from comparable sources -- specifically, 10k Wikipedia articles from official database dumps and 10k news articles from either RSS feeds or web scrapes, when available. Style is relatively formal; subject matter is a variety of notable things and goings-on. Source: http://wortschatz.uni-leipzig.de/en/download
- **UDHR:** The UN's Universal Declaration of Human Rights document, translated into hundreds of languages and split into paragraphs. Style is formal; subject matter is fundamental human rights to be universally protected. Source: https://unicode.org/udhr/index.html
- **Twitter:** A collection of tweets in each of ~70 languages, posted in July 2014, with languages assigned through a combination of models and human annotators. Style is informal; subject matter is whatever Twitter was going on about back then, who could say. Source: https://blog.twitter.com/engineering/en_us/a/2015/evaluating-language-identification-performance.html
- **DSLCC**: Two collections of short excerpts of journalistic texts in a handful of language groups that are highly similar to each other. Style is relatively formal; subject matter is current events. Source: http://ttg.uni-saarland.de/resources/DSLCC/
"""
import argparse
import collections
import logging
import operator
import os
import random
import re
import statistics
import sys
import tarfile
from pprint import pprint
import joblib
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.metrics
import sklearn.model_selection
import tqdm
import twitter # python-twitter on pypi
import yaml
from cytoolz import itertoolz
import textacy
import textacy.datasets
import textacy.io
import textacy.preprocessing
logging.basicConfig(level=logging.INFO)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--root_dirpath",
required=True,
help="path to root directory under which datasets and models are saved",
)
parser.add_argument(
"--twitter_creds",
required=True,
help="path to file containing twitter API credentials",
)
parser.add_argument(
"--min_len",
type=int,
default=25,
help="minimum number of alphanumeric characters in a text "
"for it to be included in the training dataset",
)
parser.add_argument(
"--min_obs",
type=int,
default=300,
help="minimum number of observations -- (text, lang) pairs -- in a language "
"for it to be included in the training dataset",
)
parser.add_argument(
"--version",
type=str,
required=True,
help="semantic version number to assign to trained model, e.g. '1.0'",
)
parser.add_argument(
"--force",
action="store_true",
default=False,
help="if specified, force downloads of all datasets, "
"even if they already exist on disk under ``root_dirpath``",
)
args = parser.parse_args()
root_dirpath = textacy.utils.to_path(args.root_dirpath).resolve()
iso_639_dirpath = root_dirpath.joinpath("iso-639")
twitter_dirpath = root_dirpath.joinpath("twitter")
tatoeba_dirpath = root_dirpath.joinpath("tatoeba")
wili_dirpath = root_dirpath.joinpath("wili")
udhr_dirpath = root_dirpath.joinpath("udhr")
dslcc_dirpath = root_dirpath.joinpath("dslcc")
models_dirpath = root_dirpath.joinpath("models")
# download, load, and summarize datasets
download_iso_639_data(iso_639_dirpath, force=args.force)
iso_639_data = load_iso_639_data(iso_639_dirpath, exclude=("sh",))
print(sorted(iso_639_data.items())[:5], "...")
download_twitter_data(twitter_dirpath, args.twitter_creds, force=args.force)
twitter_data = load_twitter_data(
twitter_dirpath, set(iso_639_data.values()), min_len=args.min_len
)
summarize_dataset(twitter_data)
download_tatoeba_data(tatoeba_dirpath, force=args.force)
tatoeba_data = load_tatoeba_data(tatoeba_dirpath, iso_639_data, min_len=args.min_len)
summarize_dataset(tatoeba_data)
download_wili_data(wili_dirpath, force=args.force)
wili_data = load_wili_data(wili_dirpath, iso_639_data, min_len=args.min_len)
summarize_dataset(wili_data)
download_udhr_data(udhr_dirpath, force=args.force)
udhr_data = load_udhr_data(
udhr_dirpath, set(iso_639_data.values()), min_len=args.min_len
)
summarize_dataset(udhr_data)
download_dslcc_data(dslcc_dirpath, force=args.force)
dslcc_data = load_dslcc_data(
dslcc_dirpath, set(iso_639_data.values()), min_len=args.min_len
)
summarize_dataset(dslcc_data)
# HACK HACK HACK
leipzig_dirpath = textacy.utils.to_path(
"/Users/burtondewilde/Desktop/datasets/language_identification/leipzig-corpora"
).resolve()
if leipzig_dirpath.is_dir():
leipzig_data = load_leipzig_data(
leipzig_dirpath, iso_639_data, min_len=args.min_len
)
summarize_dataset(leipzig_data)
else:
logging.warning("leipzig data hack unavailable, sorry")
leipzig_data = []
# aggregate and sample datasets
datasets = (
udhr_data
+ wili_data
+ get_random_sample(tatoeba_data, 420000, stratify=True, random_state=42)
+ get_random_sample(leipzig_data, 480000, stratify=True, random_state=42)
+ get_random_sample(
twitter_data, len(twitter_data), stratify=True, random_state=42
)
+ get_random_sample(dslcc_data, 20000, stratify=True, random_state=42)
+ get_random_sample(
[item for item in tatoeba_data if item[1] == "en"],
10000,
stratify=False,
random_state=42,
)
)
datasets = get_random_sample(datasets, 3000000, stratify=True)
valid_langs = {
lang
for lang, count in collections.Counter(
lang for _, lang in datasets
).most_common()
if count >= args.min_obs
}
datasets = [text_lang for text_lang in datasets if text_lang[1] in valid_langs]
chars_set = {char for text, _ in datasets for char in text}
langs_set = {lang for _, lang in datasets}
num_langs = len(langs_set)
print("# unique chars:", len(chars_set))
print("# unique langs:", num_langs)
summarize_dataset(datasets)
train_items, test_items = sklearn.model_selection.train_test_split(
datasets, test_size=0.2, random_state=42
)
print(train_items[0])
print("# train items:", len(train_items))
print("# test items:", len(test_items))
# fit and validate a model
model_id = "lang-identifier-v{}-sklearn-v{}".format(
args.version, sklearn.__version__[:4]
)
print("training model {} ...".format(model_id))
lid = textacy.lang_utils.LangIdentifier(data_dir=models_dirpath)
lid.init_pipeline()
print(lid.pipeline.steps)
lid.pipeline.fit(
[text for text, _ in train_items], y=[lang for _, lang in train_items],
)
true, preds = test_model(
lid.pipeline, test_items, filepath=models_dirpath.joinpath(model_id + ".txt")
)
try:
ax = plot_confusion_matrix(
true, preds, normalize=True, title=None, cmap=plt.cm.Blues, annotate=False
)
ax.get_figure().savefig(
models_dirpath.joinpath(model_id + "-confusion-matrix.png")
)
except Exception:
pass # well, at least we tried
joblib.dump(lid.pipeline, models_dirpath.joinpath(model_id + ".pkl.gz"), compress=3)
def download_iso_639_data(dirpath, force=False):
"""
Download official ISO 639 code table as a TSV,
mapping all language code variations (639-1, 639-2, 639-3)
to each other.
Args:
dirpath (str or :class:`pathlib.Path`)
force (bool)
References:
https://iso639-3.sil.org/code_tables/639/data
"""
url = "https://iso639-3.sil.org/sites/iso639-3/files/downloads/iso-639-3.tab"
textacy.io.download_file(url, filename="iso-639-3.tsv", dirpath=dirpath, force=force)
def load_iso_639_data(dirpath, exclude=None):
"""
Args:
dirpath (str or :class:`pathlib.Path`)
exclude (Set[str])
Returns:
Dict[str, str]
"""
dirpath = textacy.utils.to_path(dirpath).resolve()
rows = textacy.io.read_csv(
dirpath.joinpath("iso-639-3.tsv").resolve(),
delimiter="\t",
fieldnames=[
"Id",
"Part2B",
"Part2T",
"Part1",
"Scope",
"Language_Type",
"Ref_Name",
"Comment",
],
quoting=1,
)
lang_map = {
row["Id"]: row["Part1"]
for row in rows
if row.get("Part1") and (exclude is None or row["Part1"] not in exclude)
}
return lang_map
def download_twitter_data(dirpath, creds_fpath, force=False):
"""
Download two collections of (lang, tweet_id) pairs from Twitter --
a "uniformly sampled" collection of ~120k tweets over all languages and
a "recall oriented" collection of ~1.5k tweets per language --
then fetch available tweets' data from the Twitter API.
Args:
dirpath (str or :class:`pathlib.Path`)
creds_fpath (str or :class:`pathlib.Path`)
force (bool)
References:
https://blog.twitter.com/engineering/en_us/a/2015/evaluating-language-identification-performance.html
TODO: Ideally, use a tweet search endpoint and filter by language,
then just iterate over all ISO-639-1 language codes.
"""
dirpath = textacy.utils.to_path(dirpath).resolve()
url_fnames = [
(
"https://raw.githubusercontent.com/mitjat/langid_eval/master/uniformly_sampled.tsv",
"uniformly_sampled.tsv",
),
(
"https://raw.githubusercontent.com/mitjat/langid_eval/master/recall_oriented.tsv",
"recall_oriented.tsv",
),
]
# download tweet ids first
for url, fname in url_fnames:
textacy.io.download_file(url, filename=fname, dirpath=dirpath, force=force)
# download full tweets data next
tweets_fpath = dirpath.joinpath("tweets.jsonl")
if tweets_fpath.is_file() and force is False:
logging.info("tweets data already downloaded to %s", tweets_fpath)
return
# load twitter ids data from disk
tweet_lang_ids = []
for fname in ["uniformly_sampled.tsv", "recall_oriented.tsv"]:
tweet_lang_ids.extend(
textacy.io.read_csv(
dirpath.joinpath(fname),
delimiter="\t",
fieldnames=["lang", "status_id"],
quoting=1,
)
)
logging.info("loaded %s tweet ids from disk", len(tweet_lang_ids))
# parse status ids
status_ids = set()
for row in tweet_lang_ids:
try:
status_ids.add(int(row["status_id"]))
# there are a small handful of bad status ids, shrug
except ValueError:
pass
logging.info("... of which %s had valid, unique ids", len(status_ids))
status_ids = list(status_ids)
# instantiate twitter api client
with textacy.utils.to_path(creds_fpath).resolve().open(mode="rt") as f:
creds = yaml.safe_load(f.read())
api = twitter.Api(sleep_on_rate_limit=True, **creds)
# get tweets data in chunks
chunk_size = 100
pbar = tqdm.tqdm(total=len(status_ids), unit="tweets")
tweets = []
try:
for chunk_ids in itertoolz.partition_all(chunk_size, status_ids):
chunk_tweets = api.GetStatuses(
chunk_ids, trim_user=True, include_entities=True, map=False
)
tweets.extend(chunk_tweets)
pbar.update(len(chunk_ids))
except Exception:
logging.exception("encountered an error while downloading tweets")
finally:
pbar.close()
tweets = [tweet.AsDict() for tweet in tweets]
logging.info("downloaded data for %s tweets", len(tweets))
textacy.io.write_json(tweets, tweets_fpath, mode="wt", lines=True)
def load_twitter_data(dirpath, langs, min_len=25):
"""
Args:
dirpath (str)
langs (Set[str])
min_len (int): minimum text length in *chars*
Returns:
List[Tuple[str, str]]
"""
dirpath = textacy.utils.to_path(dirpath).resolve()
raw_tweets = textacy.io.read_json(
dirpath.joinpath("tweets.jsonl"), mode="rt", lines=True
)
tweets = []
for tweet in raw_tweets:
# totally remove any URLS from tweet text
for url in tweet.get("urls", []):
for item in url.values():
tweet["text"] = tweet["text"].replace(item, "")
tweets.append(tweet)
ds = [
(tweet["text"], tweet["lang"])
for tweet in tweets
if tweet["lang"] in langs
and itertoolz.count(char for char in tweet["text"] if char.isalnum()) >= min_len
]
return ds
def download_tatoeba_data(dirpath, force=False):
"""
Args:
dirpath (str or :class:`pathlib.Path`)
force (bool)
References:
https://tatoeba.org/eng/downloads
"""
url = "http://downloads.tatoeba.org/exports/sentences.tar.bz2"
fpath = textacy.io.download_file(url, dirpath=dirpath, force=force)
if fpath:
textacy.io.unpack_archive(fpath, extract_dir=dirpath)
def load_tatoeba_data(dirpath, iso_lang_map, min_len=25):
"""
Args:
dirpath (str or :class:`pathlib.Path`)
iso_lang_map (Dict[str, str])
min_len (int): minimum text length in *chars*
Returns:
List[Tuple[str, str]]
"""
dirpath = textacy.utils.to_path(dirpath).resolve()
rows = textacy.io.read_csv(
dirpath.joinpath("sentences.csv"),
fieldnames=["sent_id", "iso-639-3", "text"],
delimiter="\t",
quoting=1,
)
langs = set(iso_lang_map.keys())
ds = [
(row["text"], iso_lang_map[row["iso-639-3"]])
for row in rows
if row["iso-639-3"] in langs
and itertoolz.count(char for char in row["text"] if char.isalnum()) >= min_len
]
return ds
def download_wili_data(dirpath, force=False):
"""
Args:
dirpath (str or :class:`pathlib.Path`)
force (bool)
References:
https://tatoeba.org/eng/downloads
"""
url = "https://zenodo.org/record/841984/files/wili-2018.zip?download=1"
fpath = textacy.io.download_file(url, dirpath=dirpath, force=force)
if fpath:
textacy.io.unpack_archive(fpath, extract_dir=dirpath)
def load_wili_data(dirpath, iso_lang_map, min_len=25):
"""
Args:
dirpath (str)
iso_lang_map (Dict[str, str])
min_len (int): minimum text length in *chars*
Returns:
List[Tuple[str, str]]
References:
https://zenodo.org/record/841984
"""
dirpath = textacy.utils.to_path(dirpath).resolve()
ds = []
for subset in ("train", "test"):
text_lines = textacy.io.read_text(
dirpath.joinpath("x_{}.txt".format(subset)), lines=True
)
lang_lines = textacy.io.read_text(
dirpath.joinpath("y_{}.txt".format(subset)), lines=True
)
texts = (line.strip() for line in text_lines)
langs = (line.strip() for line in lang_lines)
langs_set = set(iso_lang_map.keys())
ds.extend(
(text, iso_lang_map[lang])
for text, lang in zip(texts, langs)
if lang in langs_set
and itertoolz.count(char for char in text if char.isalnum()) >= min_len
)
return ds
def download_udhr_data(dirpath, force=False):
"""
Args:
dirpath (str or :class:`pathlib.Path`)
force (bool)
"""
ds = textacy.datasets.UDHR(data_dir=dirpath)
ds.download(force=force)
def load_udhr_data(dirpath, langs, min_len=25):
"""
Args:
dirpath (str or :class:`pathlib.Path`)
langs (Set[str])
min_len (int)
Returns:
List[Tuple[str, str]]
"""
ds = textacy.datasets.UDHR(data_dir=dirpath)
data = [
(snippet, meta["lang"])
for text, meta in ds.records()
for snippet in text.split("\n")
if meta["lang"] in langs
and itertoolz.count(char for char in snippet if char.isalnum()) >= min_len
]
return data
def download_dslcc_data(dirpath, force=False):
"""
Download two multilingual collections of short excerpts of journalistic texts,
focused on language groups that are very similar and thus more difficult
to correctly identify.
Args:
dirpath (str or :class:`pathlib.Path`)
force (bool)
References:
http://ttg.uni-saarland.de/resources/DSLCC/
"""
dirpath = textacy.utils.to_path(dirpath).resolve()
for version in [3, 4]:
name = "dslcc{}".format(version)
url = "http://scholar.harvard.edu/files/malmasi/files/{}.zip".format(name)
fpath = textacy.io.download_file(url, dirpath=dirpath, force=force)
if fpath:
textacy.io.unpack_archive(fpath, extract_dir=dirpath.joinpath(name))
def load_dslcc_data(dirpath, langs, min_len=25):
"""
Args:
dirpath (str)
langs (Set[str])
min_len (int): minimum text length in *chars*
Returns:
List[Tuple[str, str]]
"""
data = []
fstubs = [
"dslcc3/train/task1-train.txt",
"dslcc3/train/task1-dev.txt",
"dslcc4/DSL-TRAIN.txt",
"dslcc4/DSL-DEV.txt",
]
for fstub in fstubs:
filepath = dirpath.joinpath(fstub)
lines = textacy.io.read_text(filepath, mode="rt", encoding="utf-8", lines=True)
for line in lines:
if not line.strip():
continue
try:
text, lang = line.split("\t")
if len(text) >= min_len and lang[:2] in langs:
data.append((text, lang[:2]))
except Exception:
logging.warning("bad line in data")
pass
return sorted(set(data), key=operator.itemgetter(1))
def load_leipzig_data(dirpath, iso_lang_map, min_len=25):
"""
Args:
dirpath (str)
iso_lang_map (Dict[str, str])
min_len (int): minimum text length in *chars*
Returns:
List[Tuple[str, str]]
References:
http://wortschatz.uni-leipzig.de/en/download
TODO: figure out a good way of downloading leipzig data programatically?
TODO: only use news data, maybe?
"""
ds = []
for filepath in sorted(textacy.io.get_filepaths(dirpath, match_regex=r"\.tar\.gz$")):
fname = os.path.basename(filepath)
lang = fname.split("_")[0]
try:
lang = iso_lang_map[lang]
except KeyError:
continue
with tarfile.open(filepath, mode="r") as tf:
for member in tf:
if re.search(r".*?-sentences\.txt$", member.name):
with tf.extractfile(member) as f:
for line in f:
idx, text = line.decode("utf-8").split(sep="\t", maxsplit=1)
text = textacy.preprocessing.normalize_whitespace(text)
if len(text) >= min_len:
ds.append((text.strip(), lang))
return ds
def summarize_dataset(ds):
"""
Args:
ds (List[Tuple[str, str]])
"""
n_obs = len(ds)
print("# observations:", n_obs)
pprint(ds[:3])
min_text_len = min(len(text) for text, _ in ds)
print("...")
print("min text len:", min_text_len)
try:
mean_text_len = statistics.mean(len(text) for text, _ in ds)
stdev_text_len = statistics.stdev(len(text) for text, _ in ds)
print("mean text len: {:.1f} +/- {:.1f}".format(mean_text_len, stdev_text_len))
except NameError:
pass
max_text_len = max(len(text) for text, _ in ds)
print("max text len:", max_text_len)
lang_counts = collections.Counter(lang for _, lang in ds)
top_counts = ", ".join(
"'{}': {}".format(lang, count) for lang, count in lang_counts.most_common(15)
)
bot_counts = ", ".join(
"'{}': {}".format(lang, count)
for lang, count in sorted(
lang_counts.items(), key=operator.itemgetter(1), reverse=True
)[-15:]
)
print("# languages:", len(lang_counts))
print("{} ... {}".format(top_counts, bot_counts))
def get_random_sample(seq, n, stratify=True, random_state=None):
"""
Args:
seq (Sequence)
n (int)
stratify (bool)
random_state (int)
Returns:
list
"""
random.seed(a=random_state)
if stratify is True:
grped = itertoolz.groupby(operator.itemgetter(1), seq)
n_per_grp = max(int(round(n / len(grped))), 1)
sample = list(
itertoolz.concat(
random.sample(examples, min(len(examples), n_per_grp))
for examples in grped.values()
)
)
random.shuffle(sample)
return sample[:n]
else:
return random.sample(seq, min(len(seq), n))
def test_model(model, ds_test, filepath=None):
exceptions = collections.Counter()
true = []
preds = []
with tqdm.tqdm(total=len(ds_test)) as pbar:
for text, lang in ds_test:
pbar.update(1)
true.append(lang)
try:
lang = model.predict([text])[0]
preds.append(lang)
except Exception:
exceptions.update([lang])
preds.append("un")
print("# exceptions :", len(exceptions))
if len(exceptions):
print(exceptions.most_common())
classification_report = sklearn.metrics.classification_report(true, preds)
print(classification_report)
if filepath:
filepath = textacy.utils.to_path(filepath).resolve()
with filepath.open(mode="wt", encoding="utf-8") as f:
f.write(classification_report)
return true, preds
def plot_confusion_matrix(
y_true, y_pred, normalize=False, title=None, cmap=plt.cm.Blues, annotate=False
):
cm = sklearn.metrics.confusion_matrix(y_true, y_pred)
classes = sklearn.utils.multiclass.unique_labels(y_true, y_pred)
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots(figsize=(12, 12), constrained_layout=True)
im = ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.figure.colorbar(im, ax=ax, shrink=0.5)
if title:
ax.set_title(title)
# formatting
ax.set(
xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes,
yticklabels=classes,
ylabel="true",
xlabel="pred",
)
ax.tick_params(axis="x", which="major", labelrotation=90)
ax.tick_params(axis="both", which="major", labelsize="x-small")
# annotation
if annotate is True:
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(
j,
i,
format(cm[i, j], fmt),
ha="center",
va="center",
color="white" if cm[i, j] > thresh else "black",
)
return ax
if __name__ == "__main__":
sys.exit(main())
| [
"burtdewilde@gmail.com"
] | burtdewilde@gmail.com |
2a0e897eb6010e63a6c52b6b416a1314f113e5f1 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part001908.py | 3700d31361a5cebcba18b3adf131fc03892afed3 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,500 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher31208(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, None), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 0
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher31208._instance is None:
CommutativeMatcher31208._instance = CommutativeMatcher31208()
return CommutativeMatcher31208._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 31207
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.1.0', S(1))
except ValueError:
pass
else:
pass
# State 31209
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.1.2', S(1))
except ValueError:
pass
else:
pass
# State 31210
if len(subjects) >= 1:
tmp3 = subjects.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2.1', tmp3)
except ValueError:
pass
else:
pass
# State 31211
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst3
subjects.appendleft(tmp3)
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp5 = subjects.popleft()
subjects6 = deque(tmp5._args)
# State 31212
if len(subjects6) >= 1:
tmp7 = subjects6.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1', tmp7)
except ValueError:
pass
else:
pass
# State 31213
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2.1.1.2', 1)
except ValueError:
pass
else:
pass
# State 31214
if len(subjects6) == 0:
pass
# State 31215
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst3
if len(subjects6) >= 1:
tmp10 = subjects6.popleft()
subst3 = Substitution(subst2)
try:
subst3.try_add_variable('i2.2.1.2.2.2.1.1.2', tmp10)
except ValueError:
pass
else:
pass
# State 31214
if len(subjects6) == 0:
pass
# State 31215
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst3
subjects6.appendleft(tmp10)
subjects6.appendleft(tmp7)
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp12 = subjects.popleft()
associative1 = tmp12
associative_type1 = type(tmp12)
subjects13 = deque(tmp12._args)
matcher = CommutativeMatcher31217.get()
tmp14 = subjects13
subjects13 = []
for s in tmp14:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp14, subst0):
pass
if pattern_index == 0:
pass
# State 31224
if len(subjects) == 0:
pass
# 0: e*x**r
yield 0, subst1
subjects.appendleft(tmp12)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part001909 import *
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
8a017d59c6a3aae183377f5536bffeb884407ec1 | 382e9ce9b99ce514ca16e08c029c1d0e9e14daef | /Python3/206. 反转链表 copy.py | fe988b891116477c04219ac5948443906ce3e3b2 | [] | no_license | a289237642/myLeetCode | e3e757f66af84f4e5b635c75f56424f92dd12b39 | 70ae207af6272ce5b8d246e3d7f092108a5272d7 | refs/heads/master | 2021-01-06T14:10:26.155778 | 2020-07-14T04:56:08 | 2020-07-14T04:56:08 | 241,354,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
curr = head
while curr:
next = curr.next
curr.next = prev
prev = curr
curr = next
return prev
def main():
head = cur = ListNode(1)
cur.next = ListNode(2)
cur = cur.next
cur.next = ListNode(3)
cur = cur.next
cur.next = ListNode(4)
cur = cur.next
cur.next = ListNode(5)
def print_all(head: ListNode) -> None:
while head:
print(head.val, '->', end='')
head = head.next
print('NULL')
print_all(head)
s = Solution()
res = s.reverseList(head)
print_all(res)
if __name__ == "__main__":
main()
| [
"a289237642@163.com"
] | a289237642@163.com |
383231ffdea33c2c4585c837f01e0ce08ac5d8dd | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /kernel/components/boosting/horzsecureboost/horz_secureboosting_arbiter.py | e83061cb0e3f057a976ff5cde2226a0fc6884aae | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,544 | py | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from numpy import random
from common.python.utils import log_utils
from kernel.components.binning.horzfeaturebinning.horz_split_points import HorzFeatureBinningServer
from kernel.components.boosting import BoostingTree
from kernel.components.boosting.horzsecureboost.horz_decision_tree_arbiter import HorzDecisionTreeArbiter
from kernel.components.boosting.horzsecureboost.horz_secureboosting_aggregator import SecureBoostArbiterAggregator
from kernel.optimizer.convergence import converge_func_factory
from kernel.transfer.variables.transfer_class.horz_secure_boost_transfer_variable \
import HorzSecureBoostingTransferVariable
from kernel.utils import consts
LOGGER = log_utils.get_logger()
class HorzSecureBoostingArbiter(BoostingTree):
def __init__(self):
super(HorzSecureBoostingArbiter, self).__init__()
self.mode = consts.HORZ
self.feature_num = 0
self.role = consts.ARBITER
self.transfer_inst = HorzSecureBoostingTransferVariable()
self.check_convergence_func = None
self.tree_dim = None
self.aggregator = SecureBoostArbiterAggregator()
self.global_loss_history = []
# federated_binning obj
self.binning_obj = HorzFeatureBinningServer()
def sample_valid_feature(self):
chosen_feature = random.choice(range(0, self.feature_num),
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in chosen_feature:
valid_features[fid] = True
return valid_features
def sync_feature_num(self):
feature_num_list = self.transfer_inst.feature_number.get(idx=-1, suffix=('feat_num',))
for num in feature_num_list[1:]:
assert feature_num_list[0] == num
return feature_num_list[0]
def sync_stop_flag(self, stop_flag, suffix):
self.transfer_inst.stop_flag.remote(stop_flag, idx=-1, suffix=suffix)
def sync_current_loss(self, suffix):
loss_status_list = self.transfer_inst.loss_status.get(idx=-1, suffix=suffix)
total_loss, total_num = 0, 0
for l_ in loss_status_list:
total_loss += l_['cur_loss'] * l_['sample_num']
total_num += l_['sample_num']
LOGGER.debug('loss status received, total_loss {}, total_num {}'.format(total_loss, total_num))
return total_loss / total_num
def sync_tree_dim(self):
tree_dims = self.transfer_inst.tree_dim.get(idx=-1, suffix=('tree_dim',))
dim0 = tree_dims[0]
for dim in tree_dims[1:]:
assert dim0 == dim
return dim0
def check_convergence(self, cur_loss):
LOGGER.debug('checking convergence')
return self.check_convergence_func.is_converge(cur_loss)
def generate_flowid(self, round_num, tree_num):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, tree_num]))
def label_alignment(self) -> List:
labels = self.transfer_inst.local_labels.get(idx=-1, suffix=('label_align',))
label_set = set()
for local_label in labels:
label_set.update(local_label)
global_label = list(label_set)
global_label = sorted(global_label)
label_mapping = {v: k for k, v in enumerate(global_label)}
self.transfer_inst.label_mapping.remote(label_mapping, idx=-1, suffix=('label_mapping',))
return label_mapping
def federated_binning(self):
self.binning_obj.average_run()
def send_valid_features(self, valid_features, epoch_idx, t_idx):
self.transfer_inst.valid_features.remote(valid_features, idx=-1, suffix=('valid_features', epoch_idx, t_idx))
def fit(self, data_inst, valid_inst=None):
self.federated_binning()
# initializing
self.feature_num = self.sync_feature_num()
self.tree_dim = 1
if self.task_type == consts.CLASSIFICATION:
label_mapping = self.label_alignment()
LOGGER.debug('label mapping is {}'.format(label_mapping))
self.tree_dim = len(label_mapping) if len(label_mapping) > 2 else 1
if self.n_iter_no_change:
self.check_convergence_func = converge_func_factory("diff", self.tol)
LOGGER.debug('begin to fit a boosting tree')
for epoch_idx in range(self.num_trees):
for t_idx in range(self.tree_dim):
valid_feature = self.sample_valid_feature()
self.send_valid_features(valid_feature, epoch_idx, t_idx)
flow_id = self.generate_flowid(epoch_idx, t_idx)
new_tree = HorzDecisionTreeArbiter(self.tree_param, valid_feature=valid_feature, epoch_idx=epoch_idx,
tree_idx=t_idx, flow_id=flow_id)
new_tree.fit()
global_loss = self.aggregator.aggregate_loss(suffix=(epoch_idx,))
self.global_loss_history.append(global_loss)
LOGGER.debug('cur epoch global loss is {},epoch_idx is {}'.format(global_loss, epoch_idx))
metric_meta = {'abscissa_name': 'iters', 'ordinate_name': 'loss', 'metric_type': 'LOSS'}
self.callback_metric(metric_name='loss',
metric_namespace='train',
metric_meta=metric_meta,
metric_data=(epoch_idx, global_loss))
if self.n_iter_no_change:
should_stop = self.aggregator.broadcast_converge_status(self.check_convergence, (global_loss,),
suffix=(epoch_idx,))
LOGGER.debug('stop flag sent')
if should_stop:
break
self.tracker.add_task_progress(1)
self.callback_metric("loss",
"train",
{'abscissa_name': 'iters', 'ordinate_name': 'loss', 'metric_type': 'LOSS'},
metric_data=("Best", min(self.global_loss_history)))
LOGGER.debug('fitting horz decision tree done')
def predict(self, data_inst):
LOGGER.debug('start predicting')
| [
"winter.zou@welab-inc.com"
] | winter.zou@welab-inc.com |
d646981c44a020b77fa4740fc263ac396d9be23d | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayMobilePublicMenuGetRequest.py | 9cd9fca9efe5d03dc782b5de4ca34947c3c417a0 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,173 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayMobilePublicMenuGetRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.mobile.public.menu.get'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
35ba3b1715093fb966f96ff2d6575a4a330714a5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02613/s533268389.py | 55553dc522e04c9c67b08c00fbde302fb1cc5c78 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | N = int(input())
S = [input() for _ in range(N)]
dic = {"AC":0, "WA":0, "TLE":0, "RE":0}
for i in range(N):
dic[S[i]] += 1
print("AC x {}".format(dic["AC"]))
print("WA x {}".format(dic["WA"]))
print("TLE x {}".format(dic["TLE"]))
print("RE x {}".format(dic["RE"]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4b49df01e8cff7cd872cc4fdd703ee17d9f854de | d7e098fe9086a5a1a437e96e8ad977ad95f5457d | /lunch/lunch/lunch/settings/base.py | 9769cfaf61e1ab197d14a8dea7d2389d6630640c | [] | no_license | victorzhangw/django-tutorial-for-programmers | 6c03cca7ecdbe870d4769a0ff31d903f5d1e2553 | 98b734b83189e69dfdf39a0bda3376769d2ecc68 | refs/heads/master | 2021-01-15T14:11:41.015793 | 2014-10-20T02:57:13 | 2014-10-20T02:57:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | """
Django settings for lunch project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
from django.core.urlresolvers import reverse_lazy
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'pages',
'events',
'stores',
'base',
'braces',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'lunch.urls'
WSGI_APPLICATION = 'lunch.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = reverse_lazy('home')
CRISPY_TEMPLATE_PACK = 'bootstrap3'
| [
"uranusjr@gmail.com"
] | uranusjr@gmail.com |
c7740205e18c19615aa6a9809aeadbeba39e5275 | c78a135789f13c257da664bfbbe9a50e0aa34fc2 | /code/testing.py | f55e8bf3cc14b1181b60946345825c43c908d6d7 | [
"MIT"
] | permissive | mkness/TheCannon | f6049a7dab16ab38b32b228cdc68dfadc3d1df72 | da1aa0baa3d42e79b6cd1e511da477a021547936 | refs/heads/master | 2021-01-17T04:02:11.378483 | 2017-11-08T23:54:05 | 2017-11-08T23:54:05 | 22,962,109 | 6 | 1 | null | 2014-12-15T13:55:49 | 2014-08-14T17:14:40 | Python | UTF-8 | Python | false | false | 1,035 | py | def main():
nobservations = 4
a, b, c = 3.0, 2.0, 1.0
f, x, y, z = generate_data(nobservations, a, b, c)
# print 'Linear results (should be {}, {}, {}):'.format(a, b, c)
# print linear_invert(f, x, y, z)
#
# print 'Non-linear results (should be {}, {}, {}):'.format(a, b, c)
# print nonlinear_invert(f, x, y, z)
def generate_data(nobservations, a, b, c, noise_level=0.01):
x, y, z = np.random.random((3, nobservations))
noise = noise_level * np.random.normal(0, noise_level, nobservations)
f = func(x, y, z, a, b, c) + noise
return f, x, y, z
def func(x, y, z, a, b, c):
f = (a**2
+ x * b**2
+ y * a * b * np.cos(c)
+ z * a * b * np.sin(c))
return f
def nonlinear_invert(f, x, y, z):
def wrapped_func(observation_points, a, b, c):
x, y, z = observation_points
return func(x, y, z, a, b, c)
xdata = np.vstack([x, y, z])
model, cov = opt.curve_fit(wrapped_func, xdata, f)
return model
main()
#nonlinear_invert(0.98,1,2,3)
| [
"whatever"
] | whatever |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.