blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a917b534a35cd0562c3df78398f0284e51011e1d | e1e01cec9ec9f89a7e4598ffda8b5282f076ba65 | /ci/trade/forms.py | 42949a740242c34d4a8ae127777190f19fb36d9b | [
"BSD-3-Clause"
] | permissive | muchu1983/CollectiveIntelligence | bf7e2ba0cb6fa94f74037de4400e46d64fc2ec1b | 9484a59257f277edf53124fbc256d4b570a3caef | refs/heads/master | 2021-03-30T15:49:58.898808 | 2017-06-17T05:49:51 | 2017-06-17T05:49:51 | 75,293,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | # -*- coding: utf-8 -*-
"""
Copyright © 2017, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
from django import forms
from trade.models import CIDeal
class CIDealForm(forms.ModelForm):
class Meta:
model = CIDeal
fields = ("strPaymentType",)
| [
"muchu1983@gmail.com"
] | muchu1983@gmail.com |
605c0f015a6c57a16c52404e6488f6ed2eb950af | 0ed751d915cc982401989fddb5205a7bea8fbe8a | /open_display_preview.py | 1753f2e89e9a0f9d57cf0dd23fb29601039ce040 | [] | no_license | slzatz/mylistmanager3 | 5d5a5b980f9b0c2a09fce36842d64a5dd88400e3 | 0d7c43a0f45164c6519ec95fd0388c1d1f6b0285 | refs/heads/master | 2020-05-21T22:10:10.684513 | 2019-06-05T14:40:06 | 2019-06-05T14:40:06 | 28,277,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,806 | py | #!bin/python
'''
curses script that is called by listmanager_cli.py do_open method
To handle the edge case of the last page, we could add page_max_rows
which would always be the same as max_rows except for the last page
Not sure it's worth it so haven't implemented it
Note that when you press an arrow key getch sees three keys in rapid succession as follows:
\033
[
A, B, C or D
Below are the basic colors supported by curses expressed as:
curses.init_pair(2, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.color_pair(2)|curses.A_BOLD
0:black, 1:red, 2:green, 3:yellow, 4:blue, 5:magenta, 6:cyan, and 7:white
Other ways to change text:
A_BLINK Blinking text
A_BOLD Extra bright or bold text
A_DIM Half bright text
A_REVERSE Reverse-video text
A_STANDOUT The best highlighting mode available
A_UNDERLINE Underlined text
'''
import sys
import curses
from datetime import datetime
import time
import json
import textwrap
from lmdb_p import *
actions = {'n':'note', 't':'title', 's':'star', 'c':'completed', '\n':'select', 'q':None}
keys = {'B':'j', 'A':'k', 'C':'l', 'D':'h'}
def open_display_preview(c_title):
screen = curses.initscr()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_WHITE)
curses.init_pair(4, 15, -1)
color_map = {'{blue}':3, '{red}':1, '{green}':2,'{white}':4}
curses.curs_set(0)
curses.cbreak() # respond to keys without needing Enter
curses.noecho()
size = screen.getmaxyx()
screen.nodelay(True)
#normal = curses.A_NORMAL
half_width = size[1]//2
win = curses.newwin(size[0]-2, half_width-1, 1, 1)
win2 = curses.newwin(size[0]-2, half_width-1, 1, half_width+1)
page = 0
row_num = 1
max_chars_line = half_width - 5
max_rows = size[0]-4
tasks = remote_session.query(Task).join(Context).\
filter(Context.title==c_title, Task.deleted==False).\
order_by(desc(Task.modified)).all()
last_page = len(tasks)//max_rows
last_page_max_rows = len(tasks)%max_rows
def draw_note(task):
win2.clear()
win2.box()
note = task.note if task.note else ""
paras = note.splitlines()
n = 1
for para in paras:
# this handles blank lines
if not para:
n+=1
continue
for line in textwrap.wrap(para, max_chars_line):
if n > max_rows:
break
try:
win2.addstr(n, 3, line) #(y,x)
except Exception as e:
pass
n+=1
win2.refresh()
def draw():
win.clear()
win.box()
page_tasks = tasks[max_rows*page:max_rows*(page+1)]
n = 1
for i,task in enumerate(page_tasks, page*max_rows+1):
if n+2 == size[0]:
break
c = ' [c]' if task.completed else ''
font = curses.color_pair(2)|curses.A_BOLD if task.star else curses.A_NORMAL
win.addstr(n, 2, f"{i}. {task.title[:max_chars_line-7]}{c}", font) #(y,x)
n+=1
win.refresh()
screen.clear()
screen.addstr(0,0, f"Hello Steve. screen size = x:{size[1]},y:{size[0]} max_rows = {max_rows} last_page = {last_page}", curses.A_BOLD)
s = "j:page down k: page up h:page left l:page right n:edit [n]ote t:edit [t]itle s:[s]elect and ENTER/RETURN no action"
if len(s) > size[1]:
s = s[:size[1]-1]
screen.addstr(size[0]-1, 0, s, curses.color_pair(3)|curses.A_BOLD)
screen.refresh()
draw()
draw_note(tasks[0])
win.addstr(row_num, 1, ">") #j
win.refresh()
accum = []
arrow = False
page_max_rows = max_rows if last_page else last_page_max_rows
while 1:
n = screen.getch()
if n == -1:
continue
c = chr(n)
if arrow:
accum.append(c)
if len(accum) == 2:
c = keys.get(accum[-1], 'z')
accum = []
arrow = False
elif c == '\x1b': #o33:
arrow = True
continue
if c in ['s', 'n', 't', 'c', '\n', 'q']:
curses.nocbreak()
screen.keypad(False)
curses.echo()
curses.endwin()
task = tasks[(page*max_rows)+row_num-1]
return {'action':actions[c], 'task_id':task.id}
elif c == 'k':
win.addstr(row_num, 1, " ") #k
row_num-=1
if row_num==0:
page = (page - 1) if page > 0 else last_page
draw()
page_max_rows = max_rows if not page==last_page else last_page_max_rows
row_num = page_max_rows
win.addstr(row_num, 1, ">") #k
win.refresh()
task = tasks[page*max_rows+row_num-1]
draw_note(task)
elif c == 'j':
win.addstr(row_num, 1, " ") #j
row_num+=1
if row_num==page_max_rows+1:
page = (page + 1) if page < last_page else 0
draw()
row_num = 1
page_max_rows = max_rows if not page==last_page else last_page_max_rows
win.addstr(row_num, 1, ">") #j
win.refresh()
task = tasks[page*max_rows+row_num-1]
draw_note(task)
elif c == 'h':
win.addstr(row_num, 1, " ") #j
page = (page - 1) if page > 0 else last_page
draw()
row_num = 1
win.addstr(row_num, 1, ">") #j
win.refresh()
task = tasks[page*max_rows]
draw_note(task)
page_max_rows = max_rows if not page==last_page else last_page_max_rows
elif c == 'l':
win.addstr(row_num, 1, " ") #j
page = (page + 1) if page < last_page else 0
draw()
row_num = 1
win.addstr(row_num, 1, ">") #j
win.refresh()
task = tasks[page*max_rows]
draw_note(task)
page_max_rows = max_rows if not page==last_page else last_page_max_rows
screen.move(0, size[1]-50)
screen.clrtoeol()
screen.addstr(0, size[1]-50, f"task num = {row_num}; char = {c}",
curses.color_pair(3)|curses.A_BOLD)
screen.refresh()
#size_current = screen.getmaxyx()
#if size != size_current:
# size = size_current
# screen.addstr(0,0, f"screen size = x:{size[1]},y:{size[0]} max_rows = {max_rows}", curses.A_BOLD)
time.sleep(.05)
| [
"slzatz@gmail.com"
] | slzatz@gmail.com |
8ccd28c39c51fd5a2bc1a78ab5638bfe860f2824 | 0c84154dac47431b8e58b52cae40002b11ebadc3 | /venv/bin/twistd | 766aebb434aa331fdad6aff976ee02fac1f8bea8 | [] | no_license | CarlLOL01/Python_Coding | 61671ed6f4361a2377f3f67a542ec05e2d3ea7f4 | cb6a1194e65fad30b1fde5713bc1fd8e51a21f77 | refs/heads/master | 2022-05-11T18:56:43.515378 | 2019-03-25T04:29:49 | 2019-03-25T04:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | #!/Users/xiaoqiang/PycharmProjects/Python_Coding/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==18.9.0','console_scripts','twistd'
__requires__ = 'Twisted==18.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==18.9.0', 'console_scripts', 'twistd')()
)
| [
"451553616@qq.com"
] | 451553616@qq.com | |
d4fca2e085a4e154869d56a349bc5af7e1569408 | 89baae8446058753cc12baf6ae6d8b598382f113 | /calculate.py | 0918160831dd1ef13f4d86045e0d81a86a59a487 | [
"MIT"
] | permissive | weimingyue/area-calculator | 3df954dc0eef5db9afca21642060139be10c1fb8 | 9651151bc7df757cb85eb9cbf732a669a96cbd59 | refs/heads/master | 2021-09-21T14:22:32.281798 | 2018-08-27T09:34:49 | 2018-08-27T09:34:49 | 272,177,372 | 1 | 0 | MIT | 2020-06-14T10:16:19 | 2020-06-14T10:16:19 | null | UTF-8 | Python | false | false | 2,865 | py | """Module to calculate."""
from pyproj import Proj
import json
from shapely.geometry import shape
from shapely.geometry import LineString
from shapely.geometry import Point
class Calculate():
"""Class to calculate."""
@staticmethod
def area(pointsOfArea, ellps='WGS84'):
"""Method to calculate area."""
# Get geojson from request:
geo = json.loads(pointsOfArea["geojson"])
# Get lng and lat from geojson coordinates:
lng, lat = zip(*geo["coordinates"][0])
# Project area of interest (By pyproj):
pa = Proj("+proj=aea ", ellps=ellps)
x, y = pa(lng, lat)
# Calculate area(by shapely):
cop = {"type": "Polygon", "coordinates": [zip(x, y)]}
area = shape(cop).area
# Preperate result in dict:
result = {"area": area}
return result
@staticmethod
def distance(pointsOfArea, ellps='WGS84'):
"""Method to calculate distance between given points."""
# Get geojson from request:
geo = json.loads(pointsOfArea["geojson"])
# Get lng and lat from geojson coordinates:
lng, lat = zip(*geo["coordinates"][0])
# Project area of interest (By pyproj):
p = Proj(proj='utm', ellps=ellps)
x, y = p(lng, lat)
# Calculate length(by shapely):
cop = {"type": "Polygon", "coordinates": [zip(x, y)]}
points = LineString(cop["coordinates"][0])
distance = points.length
# Preperate result in dict:
result = {"distance": distance}
return result
@staticmethod
def circumference(pointsOfArea, ellps='WGS84'):
"""Method to calculate circumference of the area."""
# Get geojson from request:
geo = json.loads(pointsOfArea["geojson"])
# Get lng and lat from geojson coordinates:
lng, lat = zip(*geo["coordinates"][0])
# Project area of interest (By pyproj):
p = Proj(proj='utm', ellps=ellps)
x, y = p(lng, lat)
# Calculate circumference(by shapely):
cop = {"type": "Polygon", "coordinates": [zip(x, y)]}
circumference = shape(cop).length
# Preperate result in dict:
result = {"circumference": circumference}
return result
@staticmethod
def contains(pointsOfArea, verifiedPoint):
"""Method for checking if the point is in a given area."""
"""Takes pointsOfArea and verifiedPoint as parameters."""
"""Location is format geojson."""
"""verifiedPoint is point for example: (19.937032, 50.061587)"""
# Get geojson from request:
geo = json.loads(pointsOfArea["geojson"])
# Checking if the point is in a given area(by shapely):
points = shape(geo).contains(Point(verifiedPoint))
result = {"contains": points}
return result
| [
"kontokamilsiwek@gmail.com"
] | kontokamilsiwek@gmail.com |
9fa75951501a62fdc4a6b2ce4a9554f7eb73da63 | 608c79a10f1216c8abec61c40ab983a216f0af92 | /bombril/logging/__init__.py | b78b216ff46fa2fe6cc5f8e487593616bbe8d25c | [
"WTFPL"
] | permissive | embatbr/bombril | 8030af6f99ea93284f71417ede33fa932ba56ac1 | 68139e46ff459d1be404002091662b591d55ae90 | refs/heads/master | 2021-04-26T21:52:21.394982 | 2019-01-18T18:47:25 | 2019-01-18T18:47:25 | 71,689,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | #! coding: utf-8
import logging
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] [%(levelname)s] [%(name)s:%(lineno)d] %(message)s',
datefmt="%Y-%m-%d %H:%M:%S %z"
)
def get_logger(name, level_name='INFO'):
logger = logging.getLogger(name)
level = getattr(logging, level_name, logging.NOTSET)
logger.setLevel(level)
# TODO remove the duplication
def log_critical(self):
func = self.critical
def _internal(msg, *args):
func(msg.format(*args))
return _internal
logger.critical = log_critical
def log_error(self):
func = self.error
def _internal(msg, *args):
func(msg.format(*args))
return _internal
logger.error = log_error(logger)
def log_warning(self):
func = self.warning
def _internal(msg, *args):
func(msg.format(*args))
return _internal
logger.warning = log_warning(logger)
def log_info(self):
func = self.info
def _internal(msg, *args):
func(msg.format(*args))
return _internal
logger.info = log_info(logger)
def log_debug(self):
func = self.debug
def _internal(msg, *args):
func(msg.format(*args))
return _internal
logger.debug = log_debug(logger)
def log_exception(self):
def _internal(err):
self.error('{}: {}', err.__class__.__name__, str(err))
return _internal
logger.log_exception = log_exception(logger)
return logger
| [
"eduardo.tenorio@creditas.com.br"
] | eduardo.tenorio@creditas.com.br |
ac5a480407e106574a0a8970c2af653101f0d6d4 | 687dd2573c9d7f240301abaf877751212d462c80 | /train.py | 001810ac4efbe5b8e8c29a7ae73ef42080ad22cb | [
"MIT"
] | permissive | goutamdadhich/Flower_Image_Classifier | 5990c09258fdb7dfa1294366fab8cb56b26d709b | 02559492bd97f2a6475c7f479345a0aff72b1e81 | refs/heads/master | 2022-07-09T21:12:40.263860 | 2020-05-17T07:14:25 | 2020-05-17T07:14:25 | 264,602,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,921 | py | # -*- coding: utf-8 -*-
"""
@author: Goutam Dadhich
@title: Image Classifier training file (train.py)
"""
# ------------------------------------------------------------------------------- #
# Import Libraries
# ------------------------------------------------------------------------------- #
import argparse
import torch
from collections import OrderedDict
from os.path import isdir
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
# ------------------------------------------------------------------------------- #
# Define Functions
# ------------------------------------------------------------------------------- #
# Function arg_parser() parses keyword arguments from the command line
def arg_parser():
# Define parser
parser = argparse.ArgumentParser(description="Neural Network Settings")
# Add architecture selection to parser
parser.add_argument('--arch',
type=str,
help='Choose architecture from torchvision.models as str')
# Add checkpoint directory to parser
parser.add_argument('--save_dir',
type=str,
help='Define save directory for checkpoints as str. If not specified then model will be lost.')
# Add hyperparameter tuning to parser
parser.add_argument('--learning_rate',
type=float,
help='Define gradient descent learning rate as float')
parser.add_argument('--hidden_units',
type=int,
help='Hidden units for DNN classifier as int')
parser.add_argument('--epochs',
type=int,
help='Number of epochs for training as int')
# Add GPU Option to parser
parser.add_argument('--gpu',
action="store_true",
help='Use GPU + Cuda for calculations')
# Parse args
args = parser.parse_args()
return args
# Function train_transformer(train_dir) performs training transformations on a dataset
def train_transformer(train_dir):
# Define transformation
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the Data
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
return train_data
# Function test_transformer(test_dir) performs test/validation transformations on a dataset
def test_transformer(test_dir):
# Define transformation
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the Data
test_data = datasets.ImageFolder(test_dir, transform=test_transforms)
return test_data
# Function data_loader(data, train=True) creates a dataloader from dataset imported
def data_loader(data, train=True):
if train:
loader = torch.utils.data.DataLoader(data, batch_size=50, shuffle=True)
else:
loader = torch.utils.data.DataLoader(data, batch_size=50)
return loader
# Function check_gpu(gpu_arg) make decision on using CUDA with GPU or CPU
def check_gpu(gpu_arg):
# If gpu_arg is false then simply return the cpu device
if not gpu_arg:
return torch.device("cpu")
# If gpu_arg then make sure to check for CUDA before assigning it
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Print result
if device == "cpu":
print("CUDA was not found on device, using CPU instead.")
return device
# primaryloader_model(architecture="vgg16") downloads model (primary) from torchvision
def primaryloader_model(architecture="vgg16"):
# Load Defaults if none specified
if type(architecture) == type(None):
model = models.vgg16(pretrained=True)
model.name = "vgg16"
print("Network architecture specified as vgg16.")
else:
exec("model = models.{}(pretrained=True)".format(architecture))
model.name = architecture
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
return model
# Function initial_classifier(model, hidden_units) creates a classifier with the corect number of input layers
def initial_classifier(model, hidden_units):
# Check that hidden layers has been input
if type(hidden_units) == type(None):
hidden_units = 4096 #hyperparamters
print("Number of Hidden Layers specificed as 4096.")
# Find Input Layers
input_features = model.classifier[0].in_features
# Define Classifier
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_features, hidden_units, bias=True)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(p=0.5)),
('fc2', nn.Linear(hidden_units, 102, bias=True)),
('output', nn.LogSoftmax(dim=1))
]))
return classifier
# Function validation(model, testloader, criterion, device) validates training against testloader to return loss and accuracy
def validation(model, testloader, criterion, device):
test_loss = 0
accuracy = 0
for ii, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
# Function network_trainer represents the training of the network model
def network_trainer(Model, Trainloader, Testloader, Device,
Criterion, Optimizer, Epochs, Print_every, Steps):
# Check Model Kwarg
if type(Epochs) == type(None):
Epochs = 5
print("Number of Epochs specificed as 5.")
print("Training process initializing .....\n")
# Train Model
for e in range(Epochs):
running_loss = 0
Model.train() # Technically not necessary, setting this for good measure
for ii, (inputs, labels) in enumerate(Trainloader):
Steps += 1
inputs, labels = inputs.to(Device), labels.to(Device)
Optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
with torch.no_grad():
valid_loss, accuracy = validation(model, validloader, criterion)
print("Epoch: {}/{} | ".format(e+1, epochs),
"Training Loss: {:.4f} | ".format(running_loss/print_every),
"Validation Loss: {:.4f} | ".format(valid_loss/len(testloader)),
"Validation Accuracy: {:.4f}".format(accuracy/len(testloader)))
running_loss = 0
model.train()
return Model
#Function validate_model(Model, Testloader, Device) validate the above model on test data images
def validate_model(Model, Testloader, Device):
# Do validation on the test set
correct = 0
total = 0
with torch.no_grad():
Model.eval()
for data in Testloader:
images, labels = data
images, labels = images.to(Device), labels.to(Device)
outputs = Model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy achieved by the network on test images is: %d%%' % (100 * correct / total))
# Function initial_checkpoint(Model, Save_Dir, Train_data) saves the model at a defined checkpoint
def initial_checkpoint(Model, Save_Dir, Train_data):
# Save model at checkpoint
if type(Save_Dir) == type(None):
print("Model checkpoint directory not specified, model will not be saved.")
else:
if isdir(Save_Dir):
# Create `class_to_idx` attribute in model
Model.class_to_idx = Train_data.class_to_idx
# Create checkpoint dictionary
checkpoint = {'architecture': Model.name,
'classifier': Model.classifier,
'class_to_idx': Model.class_to_idx,
'state_dict': Model.state_dict()}
# Save checkpoint
torch.save(checkpoint, 'my_checkpoint.pth')
else:
print("Directory not found, model will not be saved.")
# =============================================================================
# Main Function
# =============================================================================
# Function main() is where all the above functions are called and executed
def main():
# Get Keyword Args for Training
args = arg_parser()
# Set directory for training
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Pass transforms in, then create trainloader
train_data = test_transformer(train_dir)
valid_data = train_transformer(valid_dir)
test_data = train_transformer(test_dir)
trainloader = data_loader(train_data)
validloader = data_loader(valid_data, train=False)
testloader = data_loader(test_data, train=False)
# Load Model
model = primaryloader_model(architecture=args.arch)
# Build Classifier
model.classifier = initial_classifier(model,
hidden_units=args.hidden_units)
# Check for GPU
device = check_gpu(gpu_arg=args.gpu);
# Send model to device
model.to(device);
# Check for learnrate args
if type(args.learning_rate) == type(None):
learning_rate = 0.001
print("Learning rate specificed as 0.001")
else: learning_rate = args.learning_rate
# Define loss and optimizer
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
# Define deep learning method
print_every = 30
steps = 0
# Train the classifier layers using backpropogation
trained_model = network_trainer(model, trainloader, validloader,
device, criterion, optimizer, args.epochs,
print_every, steps)
print("\nTraining process is now complete!!")
# Quickly Validate the model
validate_model(trained_model, testloader, device)
# Save the model
initial_checkpoint(trained_model, args.save_dir, train_data)
# =============================================================================
# Run Program
# =============================================================================
if __name__ == '__main__': main() | [
"noreply@github.com"
] | noreply@github.com |
45be337a0d8c7c093f97c89e9343659c2838d976 | f47383f90e794416e12d34d4c15b354a0cc4d271 | /cmp/lexer/lexer.py | 475ac1782e7e13d79f4c6098774bd229d36a931e | [] | no_license | adrian13579/CoolInterpreter | ecff721c7c92e0e5d9cc5f7f2bf4855abcc54d36 | 154bd734a9111a1510e5591ed9d79844c72496a5 | refs/heads/master | 2023-03-07T02:00:18.532393 | 2021-02-18T23:09:10 | 2021-02-18T23:09:10 | 262,991,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | from cmp.automata import State
from cmp.utils import Token
from cmp.lexer.regex import Regex
class Lexer:
def __init__(self, table, eof):
self.eof = eof
self.regexs = self._build_regexs(table)
self.automaton = self._build_automaton()
@staticmethod
def _build_regexs(table):
regexs = []
for n, (token_type, regex) in enumerate(table):
NFA = Regex.build_automaton(regex)
automaton, states = State.from_nfa(NFA, get_states=True)
for state in automaton:
if state.final:
state.tag = [(n, token_type)]
regexs.append(automaton)
return regexs
def _build_automaton(self):
start = State('start')
for automaton in self.regexs:
start.add_epsilon_transition(automaton)
return start.to_deterministic()
def _walk(self, string):
state = self.automaton
final_lex = lex = ''
token_type = None
priority = 500000
for symbol in string:
try:
state = state.get(symbol)
lex += symbol
if state.final:
if state.tag is not None:
for tag in state.tag:
if lex == str(tag[1]) and str(tag[1]) != 'id':
priority, token_type = tag
final_lex = lex
break
elif tag[0] <= priority:
priority, token_type = tag
final_lex = lex
except KeyError:
break
return token_type, final_lex
def _tokenize(self, text):
while len(text) > 0:
token_type, lex = self._walk(text)
yield lex, token_type
text = text[len(lex):]
yield '$', self.eof
def __call__(self, text):
return [Token(lex, ttype) for lex, ttype in self._tokenize(text)]
| [
"adrianportales135@gmail.com"
] | adrianportales135@gmail.com |
f271b8ae35a2d87f5a6edfd3a2164f29bfca683e | 5781bda84c1af759e7b0284f0489d50e68044c89 | /app/model/network.py | 0fd902e34350e3e8251d9ad86c8abc47d54292d6 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | Stanford-PERTS/triton | 43306a582630ac6ef8d2d14c8b2a56279335a7fb | 5a4f401fc7019d59ce4c41eafa6c5bda822fae0a | refs/heads/master | 2022-10-17T11:51:10.220048 | 2020-06-14T17:37:54 | 2020-06-14T17:37:54 | 272,251,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,984 | py | """
Network
===========
Network, one-to-one with its team, with all classroom's students participating,
comprised of growth conditions.
"""
import logging
import string
from model import SqlModel, SqlField as Field
import mysql_connection
import os_random
class InvalidNetworkAssociation(Exception):
"""Provided id(s) are circular, of the wrong kind, or otherwise invalid."""
pass
class Network(SqlModel):
table = 'network'
py_table_definition = {
'table_name': table,
'fields': [
# name, type, length, unsigned, null, default, on_update
Field('uid', 'varchar', 50, None, False, None, None),
Field('short_uid', 'varchar', 50, None, False, None, None),
Field('created', 'datetime',None, None, False, SqlModel.sql_current_timestamp, None),
Field('modified', 'datetime',None, None, False, SqlModel.sql_current_timestamp, SqlModel.sql_current_timestamp),
Field('name', 'varchar', 200, None, False, None, None),
Field('program_id', 'varchar', 50, None, False, None, None),
Field('association_ids','varchar',3500, None, False, '[]', None),
Field('code', 'varchar', 50, None, False, None, None),
],
'primary_key': ['uid'],
'indices': [
{
'unique': True,
'name': 'code',
'fields': ['code'],
},
],
'engine': 'InnoDB',
'charset': 'utf8mb4',
'collate': 'utf8mb4_unicode_ci',
}
json_props = ['association_ids']
@classmethod
def create(klass, **kwargs):
if 'code' not in kwargs:
kwargs['code'] = klass.generate_unique_code()
# else the code is specified, and if it's a duplicate, MySQL will raise
# an exception b/c there's a unique index on that field.
return super(klass, klass).create(**kwargs)
@classmethod
def generate_unique_code(klass):
chars = string.ascii_uppercase + string.digits
for x in range(5):
code = ''.join(os_random.choice(chars) for x in range(6))
matches = klass.get(code=code)
if len(matches) == 0:
break
if len(matches) > 0:
raise Exception("After five tries, could not generate a unique"
"network invitation code.")
return code
@classmethod
def query_by_user(klass, user, program_id=None):
if len(user.owned_networks) == 0:
return []
query = '''
SELECT *
FROM `{table}`
WHERE `uid` IN ({ids}) {program_clause}
ORDER BY `name`
'''.format(
table=klass.table,
ids=','.join('%s' for uid in user.owned_networks),
program_clause='AND `program_id` = %s' if program_id else ''
)
params = tuple(user.owned_networks +
([program_id] if program_id else []))
with mysql_connection.connect() as sql:
row_dicts = sql.select_query(query, params)
return [klass.row_dict_to_obj(d) for d in row_dicts]
def before_put(self, init_kwargs, *args, **kwargs):
# Allow this to raise an exception to prevent bad associations from
# being saved.
self.associated_organization_ids(pending_network=self)
if self.uid in self.association_ids:
raise InvalidNetworkAssociation(
"Networks can't reference themselves: {}".format(self.uid)
)
def associated_organization_ids(self, depth=0, pending_network=None):
"""Traverse all network-to-network relationships to associated orgs.
Returns a flat and unique list of org ids.
"""
# While we support network-to-network, this recursive function could
# generate many inefficient db calls if we get carried away.
if depth >= 4:
raise InvalidNetworkAssociation(
"Too much depth in network associations: {}"
.format(self.uid)
)
org_ids = set()
for assc_id in self.association_ids:
kind = SqlModel.get_kind(assc_id)
if kind == 'Network':
# Note! This function is often run as a before_put check that
# the associations are valid. This means we have to consider
# the as-of-yet-unsaved "root" network (the `pending_network`)
# and not any version of it we might fetch from the db in order
# to catch the introduction of circular references.
if pending_network and assc_id == pending_network.uid:
child_network = pending_network
else:
child_network = Network.get_by_id(assc_id)
if child_network:
child_org_ids = child_network.associated_organization_ids(
depth=depth + 1,
pending_network=pending_network,
)
org_ids.update(child_org_ids)
else:
# No exception here because we don't want Networks to
# become unusable if an associated thing gets deleted.
# @todo: consider having this actually remove the
# association ids from the list.
logging.warning(
"Bad reference in {}: association {} doesn't exist."
.format(self.uid, assc_id)
)
elif kind == 'Organization':
org_ids.add(assc_id)
else:
raise InvalidNetworkAssociation(
"Invalid association kind: {}".format(kind))
return org_ids
| [
"chris@perts.net"
] | chris@perts.net |
0b79ac5396347490fd626f07f421f6e523df339e | 8edeea75ebd28e4829e6640b51b191c7f96db2d8 | /software_design/assignment5/assignment_5_files-mod/impl2.py~ | bc23b1d02af64e18210f31e4312dc43804a24ffc | [] | no_license | neppramod/etsu-projects | e2c5e93f714bb4a5c9033d860106561118328dd8 | 7c3790d7d8e56374f984fc587d08f7d22dbbcc7f | refs/heads/master | 2021-01-20T20:36:06.304770 | 2016-06-14T22:16:39 | 2016-06-14T22:16:39 | 61,149,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances: # instantiate shared class instance on first object instantiation
Singleton._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
else: # reinvoke __init__ for the lone instance on subsequent instantiations. remove clause if re-init not desired
Singleton._instances[cls].__init__(*args, **kwargs)
return Singleton._instances[cls]
class FileCabinet(metaclass=Singleton):
_me = "?? FileCabinet.{}: "
_fileListCollection = []
def __init__(self, *args, *fileList):
rept = []
for filename, position in fileList:
if filename in FileCabinet._fileListCollection:
rept.append(filename)
else:
FileCabinet._fileListCollection.append(filename)
if len(rept) > 0:
self.assert_defined("init", rept)
def assert_defined(self, routine, fileError):
assert 'value' in dir(self), (FileCabinet._me + "duplicate filename in filespec. {}").format(routine, fileError)
def print_files(self):
for file in FileCabinet._fileListCollection:
print("{0}".format(file))
fc = FileCabinet(['a', 'b'])
| [
"nepalp@goldmail.etsu.edu"
] | nepalp@goldmail.etsu.edu | |
e2d092698c224e507ea31bfb207d4ece530bab92 | 9b3c4a6035f137015b0f3a4836ac1ed5a83e6047 | /test/_bsd.py | 03915b1ba004cc8bf192b23e5deda6cf39a9c99d | [
"BSD-3-Clause"
] | permissive | goodtiding5/psutil | 8b1b5e9bc439aad7f730290e8ff697006fd9bfd9 | 6892e9e0a841cba62f64e55aff6b4f8c807e314f | refs/heads/master | 2021-01-18T06:55:51.823760 | 2016-02-04T21:04:58 | 2016-02-04T21:04:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,758 | py | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: (FreeBSD) add test for comparing connections with 'sockstat' cmd.
"""Tests specific to all BSD platforms. These are implicitly run by
test_psutil.
py."""
import datetime
import os
import subprocess
import sys
import time
import psutil
from psutil._common import BSD
from psutil._common import FREEBSD
from psutil._common import NETBSD
from psutil._common import OPENBSD
from psutil._compat import PY3
from test_psutil import get_test_subprocess
from test_psutil import MEMORY_TOLERANCE
from test_psutil import reap_children
from test_psutil import retry_before_failing
from test_psutil import sh
from test_psutil import unittest
from test_psutil import which
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
if os.getuid() == 0: # muse requires root privileges
MUSE_AVAILABLE = which('muse')
else:
MUSE_AVAILABLE = False
def sysctl(cmdline):
"""Expects a sysctl command with an argument and parse the result
returning only the value of interest.
"""
result = sh("sysctl " + cmdline)
if FREEBSD:
result = result[result.find(": ") + 2:]
elif OPENBSD or NETBSD:
result = result[result.find("=") + 1:]
try:
return int(result)
except ValueError:
return result
def muse(field):
"""Thin wrapper around 'muse' cmdline utility."""
out = sh('muse')
for line in out.split('\n'):
if line.startswith(field):
break
else:
raise ValueError("line not found")
return int(line.split()[1])
# =====================================================================
# --- All BSD*
# =====================================================================
@unittest.skipUnless(BSD, "not a BSD system")
class BSDSpecificTestCase(unittest.TestCase):
"""Generic tests common to all BSD variants."""
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_process_create_time(self):
cmdline = "ps -o lstart -p %s" % self.pid
p = subprocess.Popen(cmdline, shell=1, stdout=subprocess.PIPE)
output = p.communicate()[0]
if PY3:
output = str(output, sys.stdout.encoding)
start_ps = output.replace('STARTED', '').strip()
start_psutil = psutil.Process(self.pid).create_time()
start_psutil = time.strftime("%a %b %e %H:%M:%S %Y",
time.localtime(start_psutil))
self.assertEqual(start_ps, start_psutil)
def test_disks(self):
# test psutil.disk_usage() and psutil.disk_partitions()
# against "df -a"
def df(path):
out = sh('df -k "%s"' % path).strip()
lines = out.split('\n')
lines.pop(0)
line = lines.pop(0)
dev, total, used, free = line.split()[:4]
if dev == 'none':
dev = ''
total = int(total) * 1024
used = int(used) * 1024
free = int(free) * 1024
return dev, total, used, free
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dev, total, used, free = df(part.mountpoint)
self.assertEqual(part.device, dev)
self.assertEqual(usage.total, total)
# 10 MB tollerance
if abs(usage.free - free) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.free, free))
if abs(usage.used - used) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.used, used))
def test_cpu_count_logical(self):
syst = sysctl("hw.ncpu")
self.assertEqual(psutil.cpu_count(logical=True), syst)
def test_virtual_memory_total(self):
num = sysctl('hw.physmem')
self.assertEqual(num, psutil.virtual_memory().total)
# =====================================================================
# --- FreeBSD
# =====================================================================
@unittest.skipUnless(FREEBSD, "not a FreeBSD system")
class FreeBSDSpecificTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pid = get_test_subprocess().pid
@classmethod
def tearDownClass(cls):
reap_children()
def test_boot_time(self):
s = sysctl('sysctl kern.boottime')
s = s[s.find(" sec = ") + 7:]
s = s[:s.find(',')]
btime = int(s)
self.assertEqual(btime, psutil.boot_time())
@retry_before_failing()
def test_memory_maps(self):
out = sh('procstat -v %s' % self.pid)
maps = psutil.Process(self.pid).memory_maps(grouped=False)
lines = out.split('\n')[1:]
while lines:
line = lines.pop()
fields = line.split()
_, start, stop, perms, res = fields[:5]
map = maps.pop()
self.assertEqual("%s-%s" % (start, stop), map.addr)
self.assertEqual(int(res), map.rss)
if not map.path.startswith('['):
self.assertEqual(fields[10], map.path)
def test_exe(self):
out = sh('procstat -b %s' % self.pid)
self.assertEqual(psutil.Process(self.pid).exe(),
out.split('\n')[1].split()[-1])
def test_cmdline(self):
out = sh('procstat -c %s' % self.pid)
self.assertEqual(' '.join(psutil.Process(self.pid).cmdline()),
' '.join(out.split('\n')[1].split()[2:]))
def test_uids_gids(self):
out = sh('procstat -s %s' % self.pid)
euid, ruid, suid, egid, rgid, sgid = out.split('\n')[1].split()[2:8]
p = psutil.Process(self.pid)
uids = p.uids()
gids = p.gids()
self.assertEqual(uids.real, int(ruid))
self.assertEqual(uids.effective, int(euid))
self.assertEqual(uids.saved, int(suid))
self.assertEqual(gids.real, int(rgid))
self.assertEqual(gids.effective, int(egid))
self.assertEqual(gids.saved, int(sgid))
# --- virtual_memory(); tests against sysctl
@retry_before_failing()
def test_vmem_active(self):
syst = sysctl("vm.stats.vm.v_active_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().active, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_inactive(self):
syst = sysctl("vm.stats.vm.v_inactive_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().inactive, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_wired(self):
syst = sysctl("vm.stats.vm.v_wire_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().wired, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_cached(self):
syst = sysctl("vm.stats.vm.v_cache_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().cached, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_free(self):
syst = sysctl("vm.stats.vm.v_free_count") * PAGESIZE
self.assertAlmostEqual(psutil.virtual_memory().free, syst,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_buffers(self):
syst = sysctl("vfs.bufspace")
self.assertAlmostEqual(psutil.virtual_memory().buffers, syst,
delta=MEMORY_TOLERANCE)
# --- virtual_memory(); tests against muse
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
def test_muse_vmem_total(self):
num = muse('Total')
self.assertEqual(psutil.virtual_memory().total, num)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_active(self):
num = muse('Active')
self.assertAlmostEqual(psutil.virtual_memory().active, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_inactive(self):
num = muse('Inactive')
self.assertAlmostEqual(psutil.virtual_memory().inactive, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_wired(self):
num = muse('Wired')
self.assertAlmostEqual(psutil.virtual_memory().wired, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_cached(self):
num = muse('Cache')
self.assertAlmostEqual(psutil.virtual_memory().cached, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_free(self):
num = muse('Free')
self.assertAlmostEqual(psutil.virtual_memory().free, num,
delta=MEMORY_TOLERANCE)
@unittest.skipUnless(MUSE_AVAILABLE, "muse cmdline tool is not available")
@retry_before_failing()
def test_muse_vmem_buffers(self):
num = muse('Buffer')
self.assertAlmostEqual(psutil.virtual_memory().buffers, num,
delta=MEMORY_TOLERANCE)
# =====================================================================
# --- OpenBSD
# =====================================================================
@unittest.skipUnless(OPENBSD, "not an OpenBSD system")
class OpenBSDSpecificTestCase(unittest.TestCase):
def test_boot_time(self):
s = sysctl('kern.boottime')
sys_bt = datetime.datetime.strptime(s, "%a %b %d %H:%M:%S %Y")
psutil_bt = datetime.datetime.fromtimestamp(psutil.boot_time())
self.assertEqual(sys_bt, psutil_bt)
def main():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(BSDSpecificTestCase))
if FREEBSD:
test_suite.addTest(unittest.makeSuite(FreeBSDSpecificTestCase))
elif OPENBSD:
test_suite.addTest(unittest.makeSuite(OpenBSDSpecificTestCase))
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
if __name__ == '__main__':
if not main():
sys.exit(1)
| [
"g.rodola@gmail.com"
] | g.rodola@gmail.com |
87d83a8b5ef1b1e9ae917f9d3e33ac00a36a5d63 | a6bb343f829b89656ee945b211b9d3cea3fffe8a | /machine_learning_nanodegree/modulo-03/aula-03/programa-3.21-age_net_worths.py | 2447bb1a6c702ffe770480c6b5f5853f0324d452 | [
"MIT"
] | permissive | abrantesasf/udacity | e6c56e42b507d9f7e4efd65f5f74570900b22ca8 | 5e6a15ad6c9346ebe4645767cc0a29606f5fba43 | refs/heads/master | 2022-04-03T14:25:48.606630 | 2020-02-12T20:32:53 | 2020-02-12T20:32:53 | 110,899,278 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 00:27:43 2018
@author: abrantesasf
"""
import numpy
import random
def ageNetWorthData():
random.seed(42)
numpy.random.seed(42)
ages = []
for ii in range(100):
ages.append( random.randint(20,65) )
net_worths = [ii * 6.25 + numpy.random.normal(scale=40.) for ii in ages]
### need massage list into a 2d numpy array to get it to work in LinearRegression
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
from sklearn.cross_validation import train_test_split
ages_train, ages_test, net_worths_train, net_worths_test = train_test_split(ages, net_worths)
return ages_train, ages_test, net_worths_train, net_worths_test | [
"abrantesasf@gmail.com"
] | abrantesasf@gmail.com |
104f7e2cb1907ba7cfaca6f2694fe5c312dd8623 | a5d905e8558150be5ece8bf146c1a8ff79890a93 | /counting.py | df4e167d921ff7de64eb89b05c0c842c64980a44 | [] | no_license | sane03/Data-Intensive-Computing-in-Data-Science | 729338f54b6a32460aaeb0b90dc85b1c099f7585 | 79b3380fdc8ce326594d1b9aeac2d914c6877c2c | refs/heads/master | 2020-04-25T00:07:31.630129 | 2019-05-17T21:25:24 | 2019-05-17T21:25:24 | 172,367,936 | 0 | 0 | null | 2019-04-16T08:57:55 | 2019-02-24T17:31:06 | C++ | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 15:03:16 2019
@author:
"""
from mrjob.job import MRJob
import time
import re
WORD_RE = re.compile(r"[\w']+")
class MRWordFreqCount(MRJob):
def mapper(self, _, line):
for word in WORD_RE.findall(line):
yield word.lower(), 1
def combiner(self, word, counts):
yield word, sum(counts)
def reducer(self, word, counts):
yield word, sum(counts)
if __name__ == '__main__':
starttime = time.time()
MRWordFreqCount.run()
endtime = time.time()
duration = endtime-starttime
print ("Time: ", duration)
| [
"noreply@github.com"
] | noreply@github.com |
2a47e879d137e708d030f6ac083e3e1fd82f3bed | 975b47ec9df9a0e2ca0f4485d5025fa0ba9d73ee | /tcutils/threading.py | af5c2e86c3079fbe5ffc59aca9c71b462fb83a7e | [] | no_license | thomascury/TCUtils | 1dd5129a41e135d4c635f79783731d90b1363c96 | aab4c52855f79efd907341752323437c111f3dbd | refs/heads/master | 2022-04-26T05:26:46.274712 | 2022-04-01T07:57:22 | 2022-04-01T07:57:22 | 48,697,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,830 | py | import datetime
import traceback
from concurrent import futures
from time import sleep
from functools import total_ordering
from tabulate import tabulate
from queue import Queue, PriorityQueue
from threading import Event
import warnings
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return ' %s:%s: %s:%s' % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_on_one_line
@total_ordering
class Sorted:
def __init__(self, priority):
self.priority = priority
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority # Less is top
class Result(Sorted):
def __init__(self, ces, headers, result):
super(Result, self).__init__(ces)
self.headers = headers
self.result = result
def __repr__(self):
return tabulate(self.result, self.result)
class Results:
def __init__(self):
self.list = []
self.headers = None
self.results = None
def add(self, result):
self.list.append(result)
if self.headers is None:
self.headers = result.headers
elif self.headers != result.headers:
raise ValueError("Headers does not match")
if self.results is None:
self.results = result.result
else:
self.results.append(result.result)
def sort(self):
self.results = list(map(lambda r: r.result, sorted(self.list)))
def __repr__(self):
if self.results is None:
return None
if self.headers is None:
return tabulate(self.results)
return tabulate(self.results, self.headers)
def worker_fn(worker_id, task_queue, result_queue, error_queue, shutdown_event):
print("Worker #{} started at {}.".format(worker_id, datetime.datetime.now()))
try:
while True:
if task_queue.empty() or shutdown_event.isSet():
break
task = task_queue.get_nowait()
try:
result = 10/task
sleep(1)
except (KeyboardInterrupt, SystemExit) as exc:
print("Worker #{}: Caught Interruption {}: {}".format(worker_id, type(exc).__name__, exc))
raise
except Exception as exc:
print("Worker #{}: Caught exception {}: {}".format(worker_id, type(exc).__name__, exc))
error_queue.put_nowait(exc)
else:
print("Worker #{}: found {}".format(worker_id, result))
result_queue.put_nowait(result)
finally:
task_queue.task_done()
finally:
print("Worker #{} stopped at {}.".format(worker_id, datetime.datetime.now()))
def main():
with futures.ThreadPoolExecutor(max_workers=3) as pool:
task_queue = Queue()
result_queue = PriorityQueue()
error_queue = Queue()
for x in range(20, -1, -1):
task_queue.put_nowait(x)
task_queue.put_nowait(0)
shutdown_event = Event()
try:
workers = [pool.submit(worker_fn, worker_id, task_queue, result_queue, error_queue, shutdown_event)
for worker_id in range(1, pool._max_workers+1)]
print("Done spawning workers.")
while task_queue.unfinished_tasks > 0 and False in [f.done() for f in workers]:
futures.wait(workers, 1)
except Exception as exc:
print("Caught exception in main thread, cancelling threads")
shutdown_event.set()
for index, future in enumerate(workers):
future.cancel_thread()
"Worker #{} : cancelling sent.".format(index)
while False in [f.done() for f in workers]:
futures.wait(workers, .5)
sleep(1)
raise exc
# Parse results
results = []
while result_queue.unfinished_tasks > 0:
result = result_queue.get_nowait()
results.append(result)
result_queue.task_done()
nb_match = len(results)
# Parse and log errors
with open("error.log", 'w') as error_log_d:
errors = []
while error_queue.unfinished_tasks > 0:
exception = error_queue.get_nowait()
try:
raise exception
except:
traceback.print_exc(file=error_log_d)
error_log_d.write("\n")
errors.append(exception)
error_queue.task_done()
if len(errors) > 0:
warnings.warn("Errors occured, check the error log.", RuntimeWarning)
print(results)
if __name__ == '__main__':
main()
| [
"thomas@cury.fr"
] | thomas@cury.fr |
062bdeafbafce09bfb0e9e3584190c6255194188 | 10c689b8fc5ff99ea69197553a14124437288343 | /docs/samples/sample1_validation.py | 31c29ff20574078cee7bf126db0c197701c26a09 | [] | no_license | napix/NapixServer | 4e4ec3dbdbc7b1fc885c55b2fd3f093cecc3da1a | e08b09594091e1d86d4de0dacd1c5801f5255253 | refs/heads/master | 2021-05-28T14:29:31.952649 | 2015-04-01T16:02:06 | 2015-04-01T16:02:06 | 15,139,808 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,242 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from napixd.exceptions import ValidationError
from napixd.managers.default import DictManager
class HostManager(DictManager):
"""
Napix Web service to manage the content of the hosts file
"""
FILE_PATH = '/etc/hosts'
resource_fields = {
'hostnames':{
'description':'List of hostname resolving to that IP',
'example':['localhost','localhost.localdomain']
},
'ip':{
'description':'IP of the host',
'example':'127.0.0.1'
}
}
name = 'hosts'
def load( self, parent ):
#open the hosts file and keep a copy
try:
handle = open( self.FILE_PATH, 'rb')
except IOError:
#There is no file, consider it as empty
return {}
self.lines = handle.readlines()
handle.close()
resources = {}
for lineno, line_content in enumerate( map( str.strip, self.lines ), 1):
#filter empty and commented lines
if not line_content or line_content[0] == '#':
continue
line = filter( bool, line_content.replace( '\t', ' ').split(' '))
# Store ID as integers
resources[ lineno ] = {
#first token on the host file is the ip
'ip' : line[0],
#remaining is the list of hostnames
'hostnames' : line[1:]
}
return resources
def generate_new_id( self, resource_dict):
#In order to avoid concurrency issue we add a blank line
# in the lines attribute so that another call
# may not give the same line number
#force load to be run
self.resources
#Add an empty line
self.lines.append('')
#return the position of that line
return len( self.lines)
def save( self, parent, resources):
new_file_content = [ '\n' ] * len( self.lines)
for lineno, original in enumerate( self.lines):
stripped = original.strip()
#Keep the comments in the file
if stripped and stripped[0] == '#':
new_file_content[lineno] = original
continue
res_id = lineno + 1
if stripped and res_id not in resources:
#the resource existed and has been removed
new_file_content[lineno] = '#;deleted: ' + original
continue
if res_id in resources:
#the resource exists and may have been modified
new_file_content[lineno] = ( '%s %s\n' % (
resources[ res_id ][ 'ip' ],
' '.join(resources[ res_id ][ 'hostnames' ]),
))
handle = open( self.FILE_PATH , 'wb')
handle.write( ''.join( new_file_content))
def validate_resource_hostnames( self, hostnames):
if ( not isinstance( hostnames, list) or
not all([ isinstance(x, str) for x in hostnames ])):
raise ValidationError, 'Hostnames have to be an array of strings'
return hostnames
def validate_resource_ip( self, ip):
if not isinstance( ip, str):
raise ValidationError, 'ip have to be a string'
ip_components = ip.split('.')
if len(ip_components) != 4:
# 123.45.67 is not an ip
raise ValidationError, 'Not an ip 1'
try:
ip_components = map(int,ip_components)
except ValueError:
#123.45.lol.99 is not an ip
raise ValidationError, 'Not an ip 2'
if not all([0 <= x <= 255 for x in ip_components]):
#123.45.67.890 is not an ip
raise ValidationError, 'Not an ip 3'
#filter the useless 0 out of 123.045.012.001
return '.'.join(map(str,ip_components))
def validate_id( self, id_):
try:
return int( id_)
except (ValueError, TypeError):
raise ValidationError, 'id have to be integer values'
def configure( self, conf):
self.FILE_PATH = conf.get( 'file_path', self.FILE_PATH )
| [
"gr@enix.fr"
] | gr@enix.fr |
c8f061405c20ff5526666798a3c0f0370a2a5169 | abfab0c0ced6a728396bc50fd5bb751e6432e930 | /signup/models.py | 946134fbed00f0fba3af4c58abe41a1718b3a8e0 | [] | no_license | vikas0694/Django_Registration_page | 69166a1f6c87c7157cf29db020253fd2e934fed9 | 1823cd10cce422fb104d05d970273a035abfbe21 | refs/heads/master | 2020-04-16T06:22:29.349728 | 2019-01-12T04:27:59 | 2019-01-12T04:27:59 | 165,343,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | from django.db import models
class Student(models.Model):
name = models.CharField(max_length=50)
emailid = models.CharField(max_length=50)
def __str__(self):
return self.username
# Create your models here.
| [
"vikas.0694@gmail.com"
] | vikas.0694@gmail.com |
8546e71065ef606ab7db63ccc285ab6cfc62e842 | 6b80bb99c2ba860af56baa4d7fc24000044b1343 | /python3/gini.py | 6ef298214a224e2e9740b52875e24cee4cb5223b | [] | no_license | zhang-yan-talendbj/think-stats | 3b1a3320ff7f56018d88211c4e9df9e20ee8fe3d | 6353ed92310c19354adee662d86db545941e511e | refs/heads/master | 2023-03-18T07:40:43.271511 | 2018-05-03T12:46:03 | 2018-05-03T12:46:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import sys
import irs
import Pmf
import Cdf
def PmfMean(pmf):
total = 0.0
for val, p in pmf.Items():
total += p * val
return total
def PmfMoment(pmf, mean=None, exponent=2):
if mean is None:
mean = PmfMean(pmf)
total = 0.0
for val, p in pmf.Items():
total += p * (val - mean)**exponent
return total
def RelativeMeanDifference(pmf, mean=None):
if mean is None:
mean = PmfMean(pmf)
diff = Pmf.Pmf()
for v1, p1 in pmf.Items():
for v2, p2 in pmf.Items():
diff.Incr(abs(v1-v2), p1*p2)
print(PmfMean(diff), mean)
return PmfMean(diff) / mean
def SummarizeData(pmf, cdf):
mean = PmfMean(pmf)
print('mean:', mean)
median = cdf.Percentile(50)
print('median:', median)
fraction_below_mean = cdf.Prob(mean)
print('fraction below mean:', fraction_below_mean)
m2 = PmfMoment(pmf, mean, 2)
m3 = PmfMoment(pmf, mean, 3)
sigma = math.sqrt(m2)
print('sigma:', sigma)
g1 = m3 / m2**(3/2)
print('skewness:', g1)
gp = 3 * (mean - median) / sigma
print('Pearsons skewness:', gp)
gini = RelativeMeanDifference(pmf) / 2
print('gini', gini)
def main(script, *args):
data = irs.ReadIncomeFile()
hist, pmf, cdf = irs.MakeIncomeDist(data)
SummarizeData(pmf, cdf)
if __name__ == "__main__":
main(*sys.argv)
| [
"zhanghao163mail@163.com"
] | zhanghao163mail@163.com |
1efc1b5e7b7a0929c03d1ac7b1ef153a2826144d | 174b611e47ed605b02cbefb11f7dc1e16d39e042 | /magazine/migrations/0001_initial.py | 1a2078b9e6099d64423ce4580977000a4f44422b | [] | no_license | yigor/i-help-u | 084af38727231e93afef4cad8d57e949478fbcc5 | b0495da0e054b537be348da89466e258c80e3c67 | refs/heads/master | 2016-09-06T07:15:09.973469 | 2015-05-24T15:05:06 | 2015-05-24T15:05:06 | 11,278,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,933 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Article'
db.create_table(u'magazine_article', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=128)),
('summary', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('cover_photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('home_photo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('cover_background', self.gf('django.db.models.fields.CharField')(default='#FFFFFF', max_length=8, blank=True)),
('is_main', self.gf('django.db.models.fields.BooleanField')(default=False)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='articles', null=True, to=orm['account.User'])),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'magazine', ['Article'])
# Adding M2M table for field topics on 'Article'
db.create_table(u'magazine_article_topics', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm[u'magazine.article'], null=False)),
('topic', models.ForeignKey(orm[u'ihelpu.topic'], null=False))
))
db.create_unique(u'magazine_article_topics', ['article_id', 'topic_id'])
# Adding M2M table for field recommended_vacancies on 'Article'
db.create_table(u'magazine_article_recommended_vacancies', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm[u'magazine.article'], null=False)),
('vacancy', models.ForeignKey(orm[u'vacancy.vacancy'], null=False))
))
db.create_unique(u'magazine_article_recommended_vacancies', ['article_id', 'vacancy_id'])
# Adding model 'Comment'
db.create_table(u'magazine_comment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['magazine.Article'])),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['magazine.Comment'])),
('body', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.User'], null=True, blank=True)),
('date_time', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'magazine', ['Comment'])
def backwards(self, orm):
# Deleting model 'Article'
db.delete_table(u'magazine_article')
# Removing M2M table for field topics on 'Article'
db.delete_table('magazine_article_topics')
# Removing M2M table for field recommended_vacancies on 'Article'
db.delete_table('magazine_article_recommended_vacancies')
# Deleting model 'Comment'
db.delete_table(u'magazine_comment')
models = {
u'account.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'activation_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'hide_contacts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'i_can': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'i_want': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ihelpu.Topic']", 'symmetrical': 'False', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '18', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'web_site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ihelpu.topic': {
'Meta': {'ordering': "('id',)", 'object_name': 'Topic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'magazine.article': {
'Meta': {'ordering': "('date_time',)", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': u"orm['account.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'cover_background': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '8', 'blank': 'True'}),
'cover_photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'home_photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'recommended_vacancies': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['vacancy.Vacancy']", 'symmetrical': 'False', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ihelpu.Topic']", 'symmetrical': 'False', 'blank': 'True'})
},
u'magazine.comment': {
'Meta': {'object_name': 'Comment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['magazine.Article']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['magazine.Comment']"})
},
u'vacancy.organization': {
'Meta': {'object_name': 'Organization'},
'address_line': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'cover_background': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '8', 'blank': 'True'}),
'cover_photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'slogan': ('django.db.models.fields.CharField', [], {'max_length': '384', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ihelpu.Topic']", 'symmetrical': 'False', 'blank': 'True'}),
'web_site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'})
},
u'vacancy.vacancy': {
'Meta': {'object_name': 'Vacancy'},
'cover_background': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '8', 'blank': 'True'}),
'cover_photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_continuous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vacancies'", 'to': u"orm['vacancy.Organization']"}),
'recommended_vacancies': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'recommended_vacancies_rel_+'", 'blank': 'True', 'to': u"orm['vacancy.Vacancy']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ihelpu.Topic']", 'symmetrical': 'False'}),
'volunteers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'vacancies'", 'symmetrical': 'False', 'to': u"orm['account.User']"})
}
}
complete_apps = ['magazine'] | [
"i.sobolev@netrika.ru"
] | i.sobolev@netrika.ru |
cb4cf646eb5e3467f839456e516cb4902c657fe1 | 3bd94c4ef9d73bb6c40e1a5c96dbe94a59ae79c7 | /Python语言程序设计基础/JSON/CSV2JSON.py | 47d17d99ef379544c78449af75e2fd2f25ac5d8d | [] | no_license | pizisuan/Learn-Python | 1131f1fd3dd02f74dd07d2c3973c2adcca1db6ec | 76684d7fbcf8c6d354fbccf5ec378c33cdb14d45 | refs/heads/master | 2021-06-02T15:05:48.449539 | 2019-10-22T03:05:34 | 2019-10-22T03:05:34 | 96,091,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import json
fr = open("price2016.csv","r")
ls = []
for line in fr:
line = line.replace("\n","")
ls.append(line.split(','))
fr.close()
fw = open("price2016.json","w")
for i in range(1,len(ls)):
ls[i] = dict(zip(ls[0],ls[i]))
json.dump(ls[1:],fw,sort_keys = True,indent = 4,ensure_ascii = False)
fw.close()
| [
"noreply@github.com"
] | noreply@github.com |
46134b5c30ca0b532262f67addad92fdbd03a9eb | 1a1b7f607c5e0783fd1c98c8bcff6460e933f09a | /core/lib/password_lib.py | 6cb7556dd99b92dd6678be4ca31f740a93006b5b | [] | no_license | smrmohammadi/freeIBS | 14fb736fcadfaea24f0acdafeafd2425de893a2d | 7f612a559141622d5042614a62a2580a72a9479b | refs/heads/master | 2021-01-17T21:05:19.200916 | 2014-03-17T03:07:15 | 2014-03-17T03:07:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,452 | py | import crypt
import random
import re
import types
import random
def getPasswords(_count,_type,_len):
"""
generate _count password of _type, and return a list of Password instances
_type(integer): password contains 1: alphabets only, 2: digits only, 3:alphabets + digits
"""
if _type==1:
chars="abcdefghijklmnopqrstuvwxyz"
elif _type==2:
chars="1234567890"
else:
chars="abcdefghijkmnpqrstuvwxyz23456789" #don't include 1&l , 0&o they are hard to distinguish
return map(lambda x:Password(generateRandomPassword(chars,_len)),range(_count))
def generateRandomPassword(chars,_len):
"""
generate a random password from characters in "chars" and length of "_len"
"""
return "".join(map(lambda x:chars[random.randint(0,len(chars)-1)],range(_len)))
class Password:
pass_chars_match=re.compile("[^A-Za-z0-9_\-]")
def __init__(self,password):
self.password=password
def __eq__(self,password_obj):
if type(password_obj)==types.StringType:
password_obj=Password(password_obj)
if self.isMd5Hash():
enc_pass=self.getMd5Crypt()
return enc_pass==password_obj.getMd5Crypt(enc_pass)
elif password_obj.isMd5Hash():
enc_pass=password_obj.getMd5Crypt()
return enc_pass==self.getMd5Crypt(enc_pass)
else:
return self.getPassword()==password_obj.getPassword()
def checkPasswordChars(self):
"""
Check Password characters
return "1" if it's OK and "0" if it's not
"""
if not len(self.password):
return 0
if self.pass_chars_match.search(self.password) != None:
return 0
return 1
def getMd5Crypt(self,salt=None):
"""
md5crypt "self.password" with "salt",
If "salt" is None,a new salt will be randomly generated and used
If "text" is already md5crypted, return it, else return crypted pass
"""
if self.isMd5Hash():
return self.password
else:
return self.__md5Crypt(salt)
def getPassword(self):
return self.password
def __md5Crypt(self,salt):
if salt==None:
salt=self.__generateRandomSalt()
return crypt.crypt(self.password,salt)
def __generateRandomSalt(self):
salt='$1$'
for i in range(8):
rand=random.randint(0,61)
if rand<10:
salt+=str(rand)
elif rand<36:
salt+=chr(rand-10+65)
else:
salt+=chr(rand-36+97)
salt += '$'
return salt
def isMd5Hash(self):
if self.password[0:3]=='$1$':
return 1
return 0 | [
"farshad_kh"
] | farshad_kh |
7292c8b2f5ac0b7e96916f04b5a65237836d49e9 | 766ca0a00ad1df5163306d2d5a6f722bc67002d3 | /mailviews/tests/manage.py | 1549d37fb3ba441106c14033ab25cfa33112d0f1 | [
"Apache-2.0"
] | permissive | agroptima/django-mailviews | 8999746eff926661635160eee7b743331737f0bc | b75fabadad66a697592abb98a417f6efec55a88d | refs/heads/master | 2021-01-24T12:03:52.787509 | 2019-11-13T13:49:15 | 2019-11-13T13:49:15 | 123,114,820 | 1 | 0 | Apache-2.0 | 2019-11-13T13:49:17 | 2018-02-27T10:43:48 | Python | UTF-8 | Python | false | false | 396 | py | #!/usr/bin/env python
import logging
import sys
from mailviews.tests import settings
logging.basicConfig(level=logging.DEBUG)
if __name__ == "__main__":
try:
from django.core.management import execute_manager
execute_manager(settings)
except ImportError:
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"ted@kaemming.com"
] | ted@kaemming.com |
cc0fb46ff5db8ade6543ea0532d8fbe428fb8aff | 3d4da4a5dfff22eb272f68947f7ae54a1b9c87e7 | /polls/migrations/0015_order_ref_code.py | 418055ffe12f5e2953fbb963e350b28fa0b29545 | [] | no_license | trantuananhvn93/django_first_app | e1b0eb2d30fb4b04425a366a14501998a6807c32 | 43676143cc435958ded0d8c75fbc2b6e072f49a9 | refs/heads/master | 2022-12-14T12:06:21.776412 | 2020-05-06T14:59:46 | 2020-05-06T14:59:46 | 226,743,564 | 0 | 0 | null | 2022-12-08T03:17:04 | 2019-12-08T22:51:32 | JavaScript | UTF-8 | Python | false | false | 397 | py | # Generated by Django 3.0 on 2019-12-09 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0014_userprofile'),
]
operations = [
migrations.AddField(
model_name='order',
name='ref_code',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| [
"TRAN.TuanAnh.vn93@gmail.com"
] | TRAN.TuanAnh.vn93@gmail.com |
17c8fd8389e918362c50a26cc24b9369815a1a80 | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /examples/asr/asr_hybrid_transducer_ctc/speech_to_text_hybrid_rnnt_ctc_bpe.py | 2de150c7132853121bcc899167c134fc7ffb54d0 | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 3,432 | py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python speech_to_text_hybrid_rnnt_ctc_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
model.aux_ctc.ctc_loss_weight=0.3 \
trainer.devices=-1 \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecHybridRNNTCTCBPEModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(
config_path="../conf/conformer/hybrid_transducer_ctc/", config_name="conformer_hybrid_transducer_ctc_bpe"
)
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecHybridRNNTCTCBPEModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| [
"noreply@github.com"
] | noreply@github.com |
d369343fe06ed20b429fd4ebd62c24898d09f1c2 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/hm-.py | 053741f131237b8d1e07af0c35648012df4dd241 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'hM-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
fde38ef34f063ea59de6329541a2ace1217cc5f5 | 39db5e60376cc255f06f54d37dd04114486e33f8 | /bomber/controllers/collections.py | 930a11495da949fb7e01dbdfc0f7a65fe1f1583c | [] | no_license | zhoujialefanjiayuan/bomber | 559730a0710408fb8e1d793efe6f33f9d222d649 | 15cd7239bb85aeaf7d2632995ca42c28ff6fbdfd | refs/heads/master | 2022-06-15T04:46:52.542297 | 2020-04-04T03:44:26 | 2020-04-04T03:44:26 | 252,060,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,742 | py | from datetime import datetime
from bottle import get, post, request, abort
from peewee import JOIN
from bomber.auth import check_api_user
from bomber.constant_mapping import (
CallActionType,
AutoListStatus,
SpecialBomber,
Cycle
)
from bomber.plugins import ip_whitelist_plugin
from bomber.db import db
from bomber.models import (
ApplicationStatus,
BombingHistory,
AutoCallList,
Application,
CallActions,
Bomber,
)
from bomber.serializers import (
bombing_history_serializer,
call_history_serializer,
)
from bomber.validator import collection_validator, cs_ptp_validator
from bomber.controllers.asserts import set_ptp_for_special_bomber
from bomber.utils import get_cycle_by_overdue_days
@get('/api/v1/applications/<app_id:int>/collection_history')
def get_collection_history(bomber, application):
result = (CallActions.select(CallActions, Bomber)
.join(Bomber, JOIN.INNER, on=(CallActions.bomber_id == Bomber.id)
.alias('operator'))
.where(CallActions.application == application.id)
.order_by(-CallActions.created_at))
return call_history_serializer.dump(result, many=True).data
@get('/api/v1/applications/<app_id:int>/user_collection_history')
def get_user_collection_history(bomber, application):
bombing_history = BombingHistory.filter(
BombingHistory.application != application.id,
BombingHistory.user_id == application.user_id,
).order_by(-BombingHistory.created_at)
return bombing_history_serializer.dump(bombing_history, many=True).data
@post('/api/v1/applications/<app_id:int>/collection_history')
def add_collection_history(bomber, application):
form = collection_validator(request.json)
with db.atomic():
promised_date = (form['promised_date']
if 'promised_date' in form else None)
remark = form['remark'] if 'remark' in form else None
promised_amount = (form['promised_amount']
if 'promised_amount' in form
else None)
if promised_date:
real_cycle = get_cycle_by_overdue_days(application.overdue_days)
if real_cycle > application.cycle:
abort(400, 'Can not extend PTP')
bombing_history = BombingHistory.create(
application=application.id,
user_id=application.user_id,
ektp=application.id_ektp,
cycle=application.cycle,
bomber=bomber.id,
promised_amount=promised_amount,
promised_date=promised_date,
follow_up_date=form['follow_up_date'],
result=form['result'],
remark=remark,
)
CallActions.create(
cycle=application.cycle,
bomber_id=bomber.id,
call_record_id=bombing_history.id,
application=application.id,
note=remark,
promised_amount=promised_amount,
promised_date=promised_date,
follow_up_date=form['follow_up_date']
)
if 'promised_amount' in form:
application.promised_amount = form['promised_amount']
if 'promised_date' in form:
if bomber.id == SpecialBomber.OLD_APP_BOMBER.value:
set_ptp_for_special_bomber(application.id, promised_date)
if application.cycle == Cycle.C1A.value:
# 1a不允许员工续p
if (application.promised_date and
application.promised_date.date() >= datetime.now().date()):
abort(400, "Can not extend PTP")
# 1a p过期件给新下p的人
if (application.promised_date and
application.promised_date.date() < datetime.now().date()):
application.latest_bomber = bomber.id
# 下p时没有latest_bomber,件分给下p的人
if not application.latest_bomber:
application.latest_bomber = bomber.id
application.promised_date = form['promised_date']
application.latest_call = bomber.id
application.ptp_bomber = bomber.id
application.status = ApplicationStatus.PROCESSING.value
if form['promised_date'] >= datetime.today().date():
update_auto_call_list = (
AutoCallList
.update(status=AutoListStatus.REMOVED.value,
description='set ptp')
.where(AutoCallList.application == application.id)
)
update_auto_call_list.execute()
application.follow_up_date = form['follow_up_date']
# 更新自动呼出队列
update_auto_call_list = (
AutoCallList
.update(follow_up_date=form['follow_up_date'],
description='set followed up date')
.where(AutoCallList.application == application.id)
)
update_auto_call_list.execute()
application.latest_bombing_time = datetime.now()
application.save()
return bombing_history_serializer.dump(bombing_history).data
@post('/api/v1/applications/<app_id:int>/cs-ptp', skip=[ip_whitelist_plugin])
def add_collection_history(app_id):
check_api_user()
form = cs_ptp_validator(request.json)
application = (Application
.filter(Application.external_id == app_id,
Application.status != ApplicationStatus.REPAID.value)
.first())
if not application:
# 如果这个件 在 cs ptp 的时候还没有进入催收 则 直接添加记录
bombing_history = BombingHistory.create(
application=app_id,
bomber=72,
promised_date=form['promised_date'],
follow_up_date=form['promised_date'],
)
return bombing_history_serializer.dump(bombing_history).data
with db.atomic():
bombing_history = BombingHistory.create(
application=application.id,
user_id=application.user_id,
ektp=application.id_ektp,
cycle=application.cycle,
bomber=72,
promised_date=form['promised_date'],
follow_up_date=form['promised_date'],
)
CallActions.create(
cycle=application.cycle,
bomber_id=72,
call_record_id=bombing_history.id,
application=application.id,
promised_date=form['promised_date'],
follow_up_date=form['promised_date'],
)
application.promised_date = form['promised_date']
if form['promised_date'] >= datetime.today().date():
update_auto_call_list = (
AutoCallList
.update(status=AutoListStatus.REMOVED.value,
description='set ptp')
.where(AutoCallList.application == application.id)
)
update_auto_call_list.execute()
application.follow_up_date = form['promised_date']
# 更新自动呼出队列
update_auto_call_list = (
AutoCallList
.update(follow_up_date=form['promised_date'],
description='set followed up date')
.where(AutoCallList.application == application.id)
)
update_auto_call_list.execute()
if not application.latest_bomber:
application.latest_bomber = 72
if application.status == ApplicationStatus.UNCLAIMED.value:
application.status = ApplicationStatus.PROCESSING.value
application.save()
return bombing_history_serializer.dump(bombing_history).data
| [
"lengyueji@ikidana.com"
] | lengyueji@ikidana.com |
70cd2745d7a4e78a9fe4b7beb1e88aec528dfd19 | be320ac66cc4da3c8cc921006a2c0f98ad0b139c | /lessons/app.py | d4cd4a746bf3a67b464b3cfae6c05022af997ef8 | [] | no_license | annaS000/surfs_up | e64d8193d0f29a55fa1eeb9c7b32f5a189f7bbad | a64ef40c543f6188ea03ac87dd358133025ebc05 | refs/heads/main | 2023-08-30T16:25:11.702794 | 2021-10-10T04:51:02 | 2021-10-10T04:51:02 | 410,977,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,320 | py | from flask import Flask, jsonify
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
#access sqlite database
engine = create_engine("sqlite:///hawaii.sqlite")
#reflect databases into classes and tables
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
#create a session link from python to database
session = Session(engine)
#create app
app = Flask(__name__)
#define welcome route
@app.route('/')
def welcome():
return(
'''
Welcome to the Climate Analysis API!
Available Routes:
/api/v1.0/precipitation
/api/v1.0/stations
/api/v1.0/tobs
/api/v1.0/temp/start/end
''')
#precipitation route
@app.route("/api/v1.0/precipitation")
def precipitation():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
precipitation = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= prev_year).all()
precip = {date: prcp for date, prcp in precipitation}
return jsonify(precip)
#stations route
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.station).all()
stations = list(np.ravel(results))
return jsonify(stations=stations)
#monthly temp route
@app.route("/api/v1.0/tobs")
def temp_monthly():
prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= prev_year).all()
temps = list(np.ravel(results))
return jsonify(temps=temps)
#summary stats
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def stats(start=None, end=None):
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
if not end:
results = session.query(*sel).\
filter(Measurement.date >= start).all()
temps = list(np.ravel(results))
return jsonify(temps)
results = session.query(*sel).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
temps = list(np.ravel(results))
return jsonify(temps)
| [
"sand7anna@outlook.com"
] | sand7anna@outlook.com |
3380f58dacc0e664efb5a2d63b0cd49b37e28241 | d0ae1156cd8a9273418a8ae383b4c165c5a5b54f | /Python Basics Loops, Lists, Logic, Quiz etc/Playing Sounds on Windows/Play_windows_7_system_sound_effects.py | a5a633e9339a2c52552f7aeba4d7b8b8ddf4c25c | [] | no_license | STJRush/handycode | e63aead20daaad6c52cba7199fcdfe8c90abb0c4 | 56adc99d6c2201397bb38e50885d1de6266fdeac | refs/heads/master | 2022-12-10T14:37:16.690632 | 2022-12-07T01:48:50 | 2022-12-07T01:48:50 | 91,385,270 | 7 | 18 | null | 2021-09-21T15:03:31 | 2017-05-15T21:11:45 | Python | UTF-8 | Python | false | false | 310 | py | import winsound
winsound.PlaySound('sound.wav', winsound.SND_FILENAME)
# Lets you use windows sound.
# look up winsound for other sounds
"""
Here's some more sounds
'SystemAsterisk' Asterisk
'SystemExclamation' Exclamation
'SystemExit' Exit Windows
'SystemHand' Critical Stop
'SystemQuestion' Question
"""
| [
"noreply@github.com"
] | noreply@github.com |
e000bcf1bfe5e0f03b0cc8a584f325a2051a6376 | b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e | /neural/neural-005/neunet.py | 14c865c2367af10d1782c0e97d545ba6a6697690 | [] | no_license | pglen/pgpygtk | 4d1405478a714f003984cf3e3db04ff1f767470b | 33f58010e304f1a312f2356de453ecedb7aa21ef | refs/heads/master | 2021-01-22T01:18:52.238415 | 2019-01-01T01:37:24 | 2019-01-01T01:37:24 | 102,215,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,780 | py | #!/usr/bin/env python
# ------------------------------------------------------------------------
# Neural network
import sys
from neulev import *
# ------------------------------------------------------------------------
# Globals
verbose = 0
pgdebug = 0
def pn(num):
return "%+0.3f" % num
# ------------------------------------------------------------------------
# The whole net:
# __ __
# --| | /------| |
# | |---x | |-----
# --|__| \ /----|__|
# __ / __
# --| | / \----| |
# | |---x | |-----
# --|__| \------|__|
#
class neunet():
# --------------------------------------------------------------------
# neumap = Spec of the network to create. Layer description in
# tuple in the form of inputs, neurons, outputs
# Generally the number of outputs and neurons match as a neuron is
# defined as a neuron with one output
def __init__(self, neumap):
# Undo related
self.last_neuron = None
self.last_bias = self.last_bias2 = None
self.last_weight = None
self.last_post = None
# Store a copy of the parameters
self.neumap = neumap[:]
self.curr = 0 # Current Neuron in creation progress
# Create neurons
self.levarr = []
for ins, neus, outs in neumap:
if verbose:
print "creating level", self.curr
lev = neulev(self, ins, neus, outs)
self.levarr.append(lev)
self.curr += 1
# Diagnostic dump
def dump(self):
#print self
for bb in self.levarr:
print "Level ", self.curr
for cc in bb.membarr:
print " Neu:", self.curr, cc.num
for dd in cc.tentarr:
print " Tent:",
print " [ in:", pn(dd.input), "w:", pn(dd.weight), "m:", pn(dd.multi), \
"b:", pn(dd.bias), "b2:", pn(dd.bias2), "p:", pn(dd.post), "]"
print
print " ",
print "%+0.3f " % cc.output,
print
# Reverse the last poke
def undo(self):
if self.last_neuron != None:
self.last_neuron.bias = self.last_bias
self.last_neuron.parent.bias = self.last_bias2
self.last_neuron.weight = self.last_weight
self.last_neuron.post = self.last_post
self.last_neuron.multi = self.last_multi
self.last_neuron = None
else:
print "duplicate undo"
# Recalculate whole net
def fire(self):
xlen = len(self.levarr)
for bb in range(xlen-1, -1, -1):
if verbose:
print "firing level", bb
self.levarr[bb].fire()
if bb > 0:
self._transfer(self.levarr[bb], self.levarr[bb - 1])
#print
# Propagate down the net
def _transfer(self, src, targ):
if verbose:
print "transfer src", src.curr, "targ", targ.curr
nlen = len(src.membarr); tlen = len(targ.membarr[0].tentarr)
for aa in range(tlen): # tenticle loop
for bb in range(nlen): # neuron loop
if pgdebug > 3:
print " transfer ", "tent", aa, "neu", bb, "src", bb, src.membarr[bb].output
try:
targ.membarr[bb].tentarr[aa].input = src.membarr[aa].output
except:
print sys.exc_info()
def showin(self):
#print "NeuNet input:",
arr = self.levarr[len(self.levarr) - 1]
for aa in arr.membarr:
for bb in aa.tentarr:
print "%+0.3f" % bb.input,
print
def showout(self):
#print "NeuNet output:",
arr = self.levarr[0]
for aa in arr.membarr:
print "%+0.3f" % aa.output,
print
def getout(self):
ret = []; arr = self.levarr[0]
for aa in arr.membarr:
ret.append(aa.output)
return ret
def sum(self):
xsum = 0.
arr = self.levarr[len(self.levarr) - 1]
for aa in arr.membarr:
xsum += aa.output
return xsum
def randtip(self):
randmemb(self.levarr).randtip()
# --------------------------------------------------------------------
# Set input value on the basis of the data coming in
def setinputbits(self, val):
#print "setinput", val, type(val)
inparr = self.levarr[len(self.levarr)-1];
xlen = len(inparr.membarr);
xshift = 1; xx = 0.
#print "xlen", xlen
for aa in range(xlen):
if val & xshift != 0: xx = 1.
else: xx = 0.
print "bit", aa, ":", xx, " xshift ", xshift
for bb in range(xlen):
inparr.membarr[aa].tentarr[bb].input = xx
xshift <<= 1
print
def setinput(self, val, ignore = True):
#print "setinput", val, type(val)
inparr = self.levarr[len(self.levarr)-1];
xlen = len(inparr.membarr)
ylen = len(inparr.membarr[0].tentarr)
#print xlen, ylen, len(val)
if not ignore:
if xlen * ylen != len(val):
msg = "Input size must match network size of %d " % (xlen * ylen)
raise ValueError(msg)
cnt = 0
for aa in range(xlen):
for bb in range(ylen):
inparr.membarr[aa].tentarr[bb].input = val[cnt]
cnt += 1
# Compare outputs with expected data
def cmp(self, val):
diff = 0; outarr = self.levarr[0].membarr
xlen = len(outarr)
for aa in range(xlen):
diff += abs(val[aa] - outarr[aa].output)
return diff / xlen
# Train this particular input to expected output
def trainone(self, val, passes = 1000):
#print "origin:", ; neu.showout()
cnt = 0; cnt2 = 0
diff = 0.; old_sum = -100.
for aa in range(passes):
self.randtip()
self.fire()
diff = self.cmp(val)
if abs(diff) >= abs(old_sum):
#print sum
self.undo()
#self.fire()
#print "undone:",
else:
print " ", "%+0.3f " % diff,
cnt += 1
#neu.showout()
old_sum = diff
#if diff < 0.01:
# break
cnt2 += 1
print
return cnt
| [
"peterglen99@gmail.com"
] | peterglen99@gmail.com |
cd741b861f90381a5d3ec6e0639544cc40cff50d | e233c5857a5e5bca9bd6a04b6104716d299c4913 | /back/api/serializers.py | cf10a037852dea84ce2c97b2e4ac60991dbd249e | [
"BSD-3-Clause"
] | permissive | maltaesousa/geoshop2 | c31a535657af529cfd9df8f316f82f5569fc2711 | 5fcf7f5de48ddbf5bafe103664a8903244734244 | refs/heads/master | 2023-08-14T05:03:18.695661 | 2023-03-02T10:10:22 | 2023-03-02T10:10:22 | 237,020,362 | 0 | 0 | BSD-3-Clause | 2020-01-29T15:48:02 | 2020-01-29T15:48:01 | null | UTF-8 | Python | false | false | 21,943 | py | import json
import copy
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.gis.gdal import GDALException
from django.contrib.gis.geos import Polygon, GEOSException, GEOSGeometry, WKTWriter
from django.utils.translation import gettext_lazy as _
from django.utils.encoding import force_str
from django.utils.http import urlsafe_base64_decode
from djmoney.contrib.django_rest_framework import MoneyField
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from allauth.account.adapter import get_adapter
from .helpers import send_geoshop_email, zip_all_orderitems
from .models import (
Copyright, Contact, Document, DataFormat, Identity,
Metadata, MetadataCategoryEch, MetadataContact, Order, OrderItem, OrderType,
Pricing, Product, ProductFormat, UserChange)
# Get the UserModel
UserModel = get_user_model()
class WKTPolygonField(serializers.Field):
"""
Polygons are serialized to POLYGON((Long, Lat)) notation
"""
def to_representation(self, value):
if isinstance(value, dict) or value is None:
return value
new_value = copy.copy(value)
wkt_w = WKTWriter()
# Use buffer and Douglas-Peucker to simplify geom (one vertex 0.2m) for large polygons
# The smallest Cadastre has 156 vertices
if new_value.num_coords > 156:
new_value = new_value.buffer(0.5)
new_value = new_value.simplify(0.2, preserve_topology=False)
wkt_w.precision = 6
new_value.transform(4326)
# number of decimals
if new_value.area > 0:
return wkt_w.write(new_value).decode()
return 'POLYGON EMPTY'
def to_internal_value(self, value):
if value == '' or value is None:
return value
if isinstance(value, GEOSGeometry):
# value already has the correct representation
return value
if isinstance(value, dict):
value = json.dumps(value)
try:
return GEOSGeometry(value)
except (GEOSException):
raise ValidationError(
_(
'Invalid format: string or unicode input unrecognized as GeoJSON, WKT EWKT or HEXEWKB.'
)
)
except (ValueError, TypeError, GDALException) as error:
raise ValidationError(
_('Unable to convert to python object: {}'.format(str(error)))
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = UserModel
fields = [
'username', 'id', 'identity'
]
class IdentitySerializer(serializers.ModelSerializer):
class Meta:
model = Identity
exclude = ['sap_id', 'contract_accepted', 'is_public', 'user']
class CopyrightSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Copyright
fields = '__all__'
class ContactSerializer(serializers.HyperlinkedModelSerializer):
belongs_to = serializers.HiddenField(
default=serializers.CurrentUserDefault(),
)
class Meta:
model = Contact
fields = '__all__'
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Document
fields = '__all__'
class DataFormatSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DataFormat
fields = '__all__'
class OrderTypeSerializer(serializers.ModelSerializer):
class Meta:
model = OrderType
fields = '__all__'
class UserIdentitySerializer(UserSerializer):
"""
Flattens User and Identity.
"""
identity = IdentitySerializer(many=False)
def to_representation(self, instance):
"""Move fields from user to identity representation."""
representation = super().to_representation(instance)
identity_representation = representation.pop('identity')
for identity_key in identity_representation:
new_key = identity_key
if new_key in representation:
new_key = 'identity_' + identity_key
representation[new_key] = identity_representation[identity_key]
return representation
class MetadataIdentitySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Identity
fields = [
'url',
'first_name', 'last_name', 'email',
'phone', 'street', 'street2',
'company_name',
'postcode', 'city', 'country']
class PublicUserIdentitySerializer(UserIdentitySerializer):
"""
User serializer that is safe to use on token protected routes.
"""
identity = MetadataIdentitySerializer(many=False)
class MetadataContactSerializer(serializers.HyperlinkedModelSerializer):
contact_person = MetadataIdentitySerializer(read_only=True)
class Meta:
model = MetadataContact
fields = [
'contact_person',
'metadata_role']
class MetadataDigestSerializer(serializers.ModelSerializer):
class Meta:
model = Metadata
fields = ['geoportal_link']
class MetadataSerializer(serializers.HyperlinkedModelSerializer):
contact_persons = serializers.SerializerMethodField()
modified_user = serializers.StringRelatedField(read_only=True)
documents = DocumentSerializer(many=True)
copyright = CopyrightSerializer(many=False)
legend_tag = serializers.StringRelatedField()
image_tag = serializers.StringRelatedField()
legend_link = serializers.SerializerMethodField()
ech_category = serializers.SlugRelatedField(
required=False,
queryset=MetadataCategoryEch.objects.all(),
slug_field='description_fr'
)
class Meta:
model = Metadata
exclude = ['datasource']
lookup_field = 'id_name'
extra_kwargs = {
'url': {'lookup_field': 'id_name'}
}
def get_contact_persons(self, obj):
"""obj is a Metadata instance. Returns list of dicts"""
qset = MetadataContact.objects.filter(metadata=obj)
return [
MetadataContactSerializer(m, context={
'request': self.context['request']
}).data for m in qset]
def get_legend_link(self, obj):
return obj.get_legend_link()
class OrderDigestSerializer(serializers.HyperlinkedModelSerializer):
"""
Serializer showing a summary of an Order.
Always exclude geom here as it is used in lists of
orders and performance can be impacted.
"""
order_type = serializers.StringRelatedField()
class Meta:
model = Order
exclude = [
'geom', 'date_downloaded', 'client',
'processing_fee_currency', 'processing_fee',
'part_vat_currency', 'part_vat', 'extract_result',
'invoice_contact']
class OrderItemSerializer(serializers.ModelSerializer):
"""
A Basic serializer for order items
"""
price = MoneyField(max_digits=14, decimal_places=2,
required=False, allow_null=True, read_only=True)
data_format = serializers.SlugRelatedField(
required=False,
queryset=DataFormat.objects.all(),
slug_field='name'
)
product = serializers.SlugRelatedField(
queryset=Product.objects.all(),
slug_field='label')
product_id = serializers.PrimaryKeyRelatedField(read_only=True)
available_formats = serializers.ListField(read_only=True)
class Meta:
model = OrderItem
exclude = ['_price_currency', '_price', '_base_fee_currency',
'_base_fee', 'last_download', 'extract_result',
'validation_date', 'token']
read_only_fields = ['price_status', 'order']
class OrderItemValidationSerializer(OrderItemSerializer):
"""
Extends OrderItemSerializer with the order_guid.
CAUTION: order_guid allows to acces to order without authentication
"""
order_guid = serializers.SerializerMethodField()
def get_order_guid(self, obj):
return obj.order.download_guid
class OrderItemTextualSerializer(OrderItemSerializer):
"""
Same as OrderItem, without Order
"""
class Meta(OrderItemSerializer.Meta):
exclude = OrderItemSerializer.Meta.exclude + ['order']
class OrderSerializer(serializers.ModelSerializer):
"""
A complete Order serializer.
"""
order_type = serializers.SlugRelatedField(
queryset=OrderType.objects.all(),
slug_field='name',
help_text='Input the translated string value, for example "Privé"')
items = OrderItemTextualSerializer(many=True)
client = serializers.HiddenField(
default=serializers.CurrentUserDefault(),
)
class Meta:
model = Order
exclude = ['date_downloaded', 'extract_result', 'download_guid']
read_only_fields = [
'date_ordered', 'date_processed',
'processing_fee_currency', 'processing_fee',
'total_cost_currency', 'total_cost',
'part_vat_currency', 'part_vat',
'status']
def create(self, validated_data):
items_data = validated_data.pop('items', None)
geom = validated_data.pop('geom', None)
order = Order(**validated_data)
order.geom = Polygon(
[xy[0:2] for xy in list(geom.coords[0])],
srid=settings.DEFAULT_SRID
)
order.save()
if not order.geom.valid:
send_geoshop_email(
_('Geoshop - Invalid geometry'),
template_name='email_admin',
template_data={
'messages': [_('A new order has been submitted and its geometry is invalid:')],
'details': {
_('order'): order.id,
}
}
)
for item_data in items_data:
item = OrderItem.objects.create(order=order, **item_data)
item.set_price()
item.save()
if order.order_type and items_data:
order.set_price()
order.save()
return order
def update(self, instance, validated_data):
if instance.status != Order.OrderStatus.DRAFT:
raise serializers.ValidationError()
items_data = validated_data.pop('items', None)
geom = validated_data.pop('geom', None)
if geom is not None:
instance.geom = Polygon(
[xy[0:2] for xy in list(geom.coords[0])],
srid=settings.DEFAULT_SRID
)
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get(
'description', instance.description)
instance.invoice_contact = validated_data.get(
'invoice_contact', instance.invoice_contact)
instance.invoice_reference = validated_data.get(
'invoice_reference', instance.invoice_reference)
instance.email_deliver = validated_data.get(
'email_deliver', instance.email_deliver)
instance.order_type = validated_data.get(
'order_type', instance.order_type)
instance.save()
update_products = []
if items_data is not None:
for item in items_data:
update_products.append(item.get('product').label)
# create / update / delete order_items on PUT (self.partial=False)
# update order_items on PATCH (self.partial=True)
order_items = list((instance.items).all())
if not self.partial:
for existing_item in order_items:
if existing_item.product.label not in update_products:
existing_item.delete()
if items_data:
for item_data in items_data:
oi_instance, created = OrderItem.objects.get_or_create(
order=instance,
product=item_data.get('product')
)
oi_instance.data_format = item_data.get(
'data_format', oi_instance.data_format)
oi_instance.product = item_data.get(
'product', oi_instance.product)
oi_instance.set_price()
oi_instance.save()
instance.set_price()
instance.save()
if instance.order_type:
if items_data or geom or 'order_type' in validated_data:
instance.set_price()
instance.save()
return instance
class PublicOrderSerializer(OrderSerializer):
"""
Meant to be accessed by token
"""
client = PublicUserIdentitySerializer(read_only=True)
class Meta(OrderSerializer.Meta):
exclude = [
'date_downloaded', 'extract_result', 'download_guid',
'processing_fee_currency', 'processing_fee', 'total_with_vat']
class ProductSerializer(serializers.ModelSerializer):
"""
Product serializer
"""
metadata = serializers.HyperlinkedRelatedField(
many=False,
read_only=True,
view_name='metadata-detail',
lookup_field='id_name'
)
pricing = serializers.StringRelatedField(
read_only=True)
provider = serializers.CharField(
source='provider.identity.company_name',
read_only=True)
metadata_summary = MetadataDigestSerializer(
source='metadata',
read_only=True
)
class Meta:
model = Product
read_only_fields = ['pricing', 'label', 'group', 'metadata_summary']
exclude = ['order', 'ts', 'geom']
class ProductExtractSerializer(ProductSerializer):
"""
Product serializer without geom
"""
metadata = None
class Meta:
model = Product
read_only_fields = ['pricing', 'label', 'group']
exclude = ['order', 'thumbnail_link', 'ts', 'metadata', 'geom']
class ExtractOrderItemSerializer(OrderItemSerializer):
"""
Orderitem serializer for extract. Allows to upload file of orderitem.
"""
extract_result = serializers.FileField(required=False)
product = ProductExtractSerializer(read_only=True)
data_format = serializers.StringRelatedField(read_only=True)
is_rejected = serializers.BooleanField(required=False)
price = None
available_formats = None
class Meta(OrderItemSerializer.Meta):
exclude = ['_price_currency', '_base_fee_currency',
'_price', '_base_fee', 'order', 'status',
'last_download', 'price_status']
read_only_fields = [
'id', 'data_format', 'product', 'srid']
def update(self, instance, validated_data):
if instance.extract_result:
# deletes previous file in filesystem
instance.extract_result.delete()
instance.comment = validated_data.pop('comment', None)
is_rejected = validated_data.pop('is_rejected')
instance.extract_result = validated_data.pop('extract_result', '')
if is_rejected:
instance.status = OrderItem.OrderItemStatus.REJECTED
if instance.extract_result.name != '':
instance.status = OrderItem.OrderItemStatus.PROCESSED
instance.save()
status = instance.order.next_status_on_extract_input()
if status == Order.OrderStatus.PROCESSED:
zip_all_orderitems(instance.order)
instance.order.save()
return instance
class ExtractOrderSerializer(serializers.ModelSerializer):
"""
Order serializer for Extract.
"""
order_type = serializers.SlugRelatedField(
queryset=OrderType.objects.all(),
slug_field='name',
help_text='Input the translated string value, for example "Privé"')
client = UserIdentitySerializer()
invoice_contact = IdentitySerializer()
geom = WKTPolygonField()
geom_srid = serializers.IntegerField()
geom_area = serializers.FloatField()
class Meta:
model = Order
exclude = [
'date_downloaded', 'processing_fee_currency',
'total_without_vat_currency', 'part_vat_currency', 'total_with_vat_currency']
read_only_fields = [
'date_ordered', 'date_processed',
'processing_fee_currency', 'processing_fee',
'total_cost_currency', 'total_cost',
'part_vat_currency', 'part_vat',
'status', 'geom_area']
class PasswordResetSerializer(serializers.Serializer):
"""
Serializer for requesting a password reset e-mail.
"""
email = serializers.EmailField()
password_reset_form_class = PasswordResetForm
def validate_email(self, value):
# Create PasswordResetForm with the serializer
self.reset_form = self.password_reset_form_class(
data=self.initial_data)
if not self.reset_form.is_valid():
raise serializers.ValidationError(self.reset_form.errors)
return value
def save(self):
request = self.context.get('request')
# Set some values to trigger the send_email method.
opts = {
'domain_override': getattr(settings, 'FRONT_URL') + getattr(settings, 'FRONT_HREF'),
'use_https': request.is_secure(),
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': request,
'email_template_name': 'email_password_reset.html',
'html_email_template_name': 'email_password_reset.html'
}
self.reset_form.save(**opts)
class PasswordResetConfirmSerializer(serializers.Serializer):
"""
Serializer for setting a new user password.
"""
new_password1 = serializers.CharField(max_length=128)
new_password2 = serializers.CharField(max_length=128)
uid = serializers.CharField()
token = serializers.CharField()
set_password_form_class = SetPasswordForm
def validate(self, attrs):
self._errors = {}
# Decode the uidb64 to uid to get User object
try:
uid = force_str(urlsafe_base64_decode(attrs['uid']))
self.user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
raise ValidationError({'uid': ['Invalid value']})
# Construct SetPasswordForm instance
self.set_password_form = self.set_password_form_class(
user=self.user, data=attrs
)
if not self.set_password_form.is_valid():
raise serializers.ValidationError(self.set_password_form.errors)
if not default_token_generator.check_token(self.user, attrs['token']):
raise ValidationError({'token': ['Invalid value']})
return attrs
def save(self):
return self.set_password_form.save()
class PricingSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Pricing
fields = '__all__'
class ProductFormatSerializer(serializers.ModelSerializer):
product = serializers.SlugRelatedField(
queryset=Product.objects.all(),
slug_field='label')
data_format = serializers.SlugRelatedField(
required=False,
queryset=DataFormat.objects.all(),
slug_field='name',
label='format')
class Meta:
model = ProductFormat
fields = '__all__'
class DataFormatListSerializer(ProductFormatSerializer):
product = None
class Meta:
model = ProductFormat
exclude = ['product']
class ProductDigestSerializer(ProductSerializer):
pricing = serializers.SlugRelatedField(
required=False,
queryset=DataFormat.objects.all(),
slug_field='name'
)
provider = serializers.CharField(
source='provider.identity.company_name',
read_only=True)
class RegisterSerializer(serializers.ModelSerializer):
"""
Serializer for user registration
"""
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
return email
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(
_("The two password fields didn't match."))
return data
def create(self, validated_data):
password = validated_data.pop('password1')
validated_data.pop('password2')
user = UserModel(username=validated_data.pop('username'))
user.set_password(password)
identity_data = self.initial_data.copy()
for key in ['password1', 'password2', 'username']:
identity_data.pop(key)
identity_serializer = IdentitySerializer(data=identity_data)
identity_serializer.is_valid(raise_exception=True)
user.save()
identity_serializer.instance = user.identity
identity_serializer.save()
return user
class Meta:
model = UserModel
exclude = [
'password', 'last_login', 'date_joined',
'groups', 'user_permissions', 'is_staff',
'is_active', 'is_superuser']
class UserChangeSerializer(serializers.ModelSerializer):
class Meta:
model = UserChange
fields = '__all__'
class ValidationSerializer(serializers.Serializer):
is_validated = serializers.BooleanField()
class VerifyEmailSerializer(serializers.Serializer):
key = serializers.CharField()
| [
"stephane.maltaesousa@ne.ch"
] | stephane.maltaesousa@ne.ch |
bc79a71833358f39c5740b9166e50e24b73bacfe | ccefb5c0a121509963c0f5e8b21567b527eee210 | /src/djangoflix/urls.py | b6c7948bad15eb7f6d4ec89961a110ea2e9f8cd3 | [
"MIT"
] | permissive | susilthapa/DjangoFlix | 841c0a8aae21cb0eb41e7c5a19f11d86e83fc1ec | f4c544826a072c04b9a93e9929e327cfd130a360 | refs/heads/main | 2023-03-14T16:41:57.935530 | 2021-03-20T22:25:06 | 2021-03-20T22:25:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """djangoflix URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"hello@teamcfe.com"
] | hello@teamcfe.com |
0ad8b8a309da9c2383079c344532ca213f7dab3d | b22e0bb8bd204c71e030e5d64b79f280960c65ee | /temp_validation_10minres_anywslsite.py | fe3a613f0ec0ea73852383775613778282bc7406 | [] | no_license | Jansing-Lukas/meteo_interpolations_wsl | 54450666d7d6008a0e0219a76af474a3c6702ba6 | 890a0dc16f96d208324be41797bd1596ed204c83 | refs/heads/master | 2020-06-20T17:45:00.788385 | 2019-07-16T14:11:55 | 2019-07-16T14:11:55 | 197,195,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,727 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 11:16:41 2019
@author: lukas jansing
Script in order to validate the interpolated temperature data
Compare 10minres data to 10minres data of LWF
Compare the three different interpolations:
- horizontally only
- standard gradient
- empirical local gradient
"""
#---------------------------------------------------------
# Import modules
#---------------------------------------------------------
import numpy as np
import datetime
import matplotlib.pyplot as plt
from dateutil.relativedelta import *
import matplotlib
import matplotlib.dates as dt
from functions import make_datelist
from import_data import import_lwf_data
treenetstationnames = ['Saillon_Versuch','Sihlwald','Bärschwil_tief','Neunkirch_Nord'
,'Versam_Bu','Felsberg_Bu','Chamoson_Bu','Vetroz_Bu','Saillon_Bu'
,'LWF-Neunkirch1','Neunkirch_SW_Bu','Bärschwil_flach','Tamins'
,'Neunkirch_SW_Ei','Chamoson_Ei_Bu','Remigen','Bueren','Chamoson_Ei'
,'Saillon_Ei','Chippis_Ei','Vetroz_Ei','Saillon_extrem','Chippis_top'
,'Surava_8300','Surava_8306','Tarasp_9107_Fi','Ransun_Fi','Sent_9152_Fi'
,'Bhutan','Pulligen','Hohtenn','Geissberg','Sent_9152_Foe','Scuol_9107_Foe'
,'Ransun_Foe','Felsberg_Foe','Versam_Foe','Surava_8106','Alvaneu_8101'
,'Alvaneu_8115','Alvaneu_8134','LWF-Lens3','Pfynwald','Chippis_Foe'
,'Bachtel','Beatenberg','Birmensdorf','Davos','Grosswangen','Jussy'
,'Laegeren_FF','Laegeren_Hut','Lausanne','Muri_Beech','Muri_Spruce'
,'Muri_Meteo','Neunkirch_SE','Neunkirch_N','Neunkirch_SW','Novaggio'
,'Pfynwald-Illgraben_NW','Pfynwald-Illgraben_N','Riehen_Forest'
,'Riehen_Meteo','Sagno_SW','Sagno_SE','Sagno_Meteo','Saillon_1'
,'Saillon_2','Saillon_3','Salgesch','Schaenis','Schmitten'
,'Sempach','Surava_S','Surava_N','Visp','Vordemwald','Zürich']
treenetstation_id = [1,2,3,4,5,6,7,8,1,10,11,12,13,14,7,16,17,7,1,20,21,1
,20,24,25,26,27,28,29,30,31,32,28,34,27,36,37,25,39,40,41
,42,43,20,45,46,47,48,49,50,51,52,53,54,54,56,14,14,14,60
,43,43,63,63,65,65,67,1,1,1,71,72,73,74,24,24,77,78,79]
#---------------------------------------------------------
# !!!!!! EDIT !!!!!!!!!
# Choose station here
# !!!!!! EDIT !!!!!!!!!
#---------------------------------------------------------
# Available stations (so far):
# TreeNet sites of LWF: Jussy, Beatenberg, Lausanne, Lens, Neunkirch,
# Novaggio, Visp, Vordemwald, Schänis
treenetstation = 'Jussy'
# needed for selecting proper CombiPrecip timeseries
station_id = treenetstation_id[treenetstationnames.index(treenetstation)]
#---------------------------------------------------------
# Define paths
#---------------------------------------------------------
stationtemppath = 'add path to MeteoSwiss temperature data'
treenettemppath = 'add path to LWF temperature data'
figpath = 'add path to your figures folder'
#---------------------------------------------------------
# Import precipitation data of TreeNet/LWF station
#---------------------------------------------------------
temp = import_lwf_data(treenetstation=treenetstation,path=treenettemppath,\
variable='temp',process_treenet_data='yes')
#---------------------------------------------------------
# Load interpolated temperature data
#---------------------------------------------------------
print('import interpolated temperature data (version without vertical interpolation)')
interpolated_temperature_data = np.load(stationtemppath+'\interpolated_temperature_10minres_novertical.npy')
temp['date_interpolation'] = interpolated_temperature_data[0,:]
interp_ind = treenetstationnames.index(treenetstation)
temp['temp_interpolated_novertical'] = interpolated_temperature_data[interp_ind+1,:].astype(float)
print('import interpolated temperature data (version with standard gradient)')
interpolated_temperature_data = np.load(stationtemppath+'\interpolated_temperature_10minres_standardgradient.npy')
temp['temp_interpolated_standardgradient'] = interpolated_temperature_data[interp_ind+1,:].astype(float)
print('import interpolated temperature data (version with empirical gradient)')
interpolated_temperature_data = np.load(stationtemppath+'\interpolated_temperature_10minres_empiricalgradient.npy')
temp['temp_interpolated_empiricalgradient'] = interpolated_temperature_data[interp_ind+1,:].astype(float)
#---------------------------------------------------------
# Create monthly datelist
#---------------------------------------------------------
# Identify earliest and latest date
earliest_date = temp['date_interpolation'][0]
latest_date = temp['date_interpolation'][-1]
# Add up to next month
year = earliest_date.year
if earliest_date.day != 1 or earliest_date.hour != 0:
month = earliest_date.month+1
earliest_date = datetime.datetime(year,month,1,0)
# Subtract to previous month
year = latest_date.year
if (latest_date.day != 31 or 30) or latest_date.hour != 23:
latest_date = latest_date - relativedelta(months=1,day=31,hour=23)
# Create the datelist to loop over
datelist_months = []
nowdate = earliest_date
while nowdate <= latest_date:
datelist_months.append(nowdate)
nowdate = nowdate+relativedelta(months=+1)
#---------------------------------------------------------
# Loop over the whole timespan in order to create plots for each month
#---------------------------------------------------------
matplotlib.rcParams.update({'font.size': 20})
for nowdate in datelist_months:
print(nowdate.strftime('%b %Y'))
print('plotting')
fig = plt.figure()
fig.set_size_inches(30, 14)
#Get latest instant of the month
month_end = nowdate+relativedelta(day=31,hour=23)
# Find indices of the month in the timeseries
wsl_ind_0 = np.where(temp['treenetdate'] == nowdate)[0][0]
interp_ind_0 = np.where(temp['date_interpolation'] == nowdate)[0][0]
wsl_ind_1 = np.where(temp['treenetdate'] == month_end)[0][0]
interp_ind_1 = np.where(temp['date_interpolation'] == month_end)[0][0]
# Get indices of wsl nans and meteoswiss nans
wsl_nans = np.argwhere(np.isnan(temp['treenettemp'][wsl_ind_0:wsl_ind_1+1]))[:,0]
meteoswiss_nans_novertical = np.argwhere(np.isnan(temp['temp_interpolated_novertical'][interp_ind_0:interp_ind_1+1]))[:,0]
meteoswiss_nans_standard = np.argwhere(np.isnan(temp['temp_interpolated_standardgradient'][interp_ind_0:interp_ind_1+1]))[:,0]
meteoswiss_nans_empirical = np.argwhere(np.isnan(temp['temp_interpolated_empiricalgradient'][interp_ind_0:interp_ind_1+1]))[:,0]
# Plot
plt.plot(temp['treenetdate'][wsl_ind_0:wsl_ind_1+1],\
temp['treenettemp'][wsl_ind_0:wsl_ind_1+1],ls='-',color='orange',label='treenet')
plt.plot(temp['date_interpolation'][interp_ind_0:interp_ind_1+1],\
temp['temp_interpolated_novertical'][interp_ind_0:interp_ind_1+1],ls='-',color='blue',label='interpolation horizontal only')
plt.plot(temp['date_interpolation'][interp_ind_0:interp_ind_1+1],\
temp['temp_interpolated_standardgradient'][interp_ind_0:interp_ind_1+1],ls='-',color='green',label='interpolation vertical standard')
plt.plot(temp['date_interpolation'][interp_ind_0:interp_ind_1+1],\
temp['temp_interpolated_empiricalgradient'][interp_ind_0:interp_ind_1+1],ls='-',color='red',label='interpolation vertical empirical')
plt.ylabel('Temperature [°C]',fontsize=20)
plt.title(treenetstation+' WSL',fontsize=25,loc='left')
plt.title(nowdate.strftime('%b %Y'),fontsize=25,loc='right')
plt.legend(loc=2)
# Calculate RMSEs
RMSE_novertical = np.sqrt(np.nanmean((temp['treenettemp'][wsl_ind_0:wsl_ind_1+1] - \
temp['temp_interpolated_novertical'][interp_ind_0:interp_ind_1+1])**2))
RMSE_standardgradient = np.sqrt(np.nanmean((temp['treenettemp'][wsl_ind_0:wsl_ind_1+1] - \
temp['temp_interpolated_standardgradient'][interp_ind_0:interp_ind_1+1])**2))
RMSE_empiricalgradient = np.sqrt(np.nanmean((temp['treenettemp'][wsl_ind_0:wsl_ind_1+1] - \
temp['temp_interpolated_empiricalgradient'][interp_ind_0:interp_ind_1+1])**2))
# Add table with values below
labels = [r'$RMSE_{novertical}$',r'$RMSE_{standard}$',r'$RMSE_{empirical}$']
plt.table(cellText=[['%.2f' % RMSE_novertical+' °C','%.2f' % RMSE_standardgradient+' °C','%.2f' % RMSE_empiricalgradient+' °C']],\
bbox = [0.0,-0.12, 1.0, 0.07],cellLoc='center',rowLoc='center',colLabels=labels,fontsize=20)
# Format x-axis and grid
ax = plt.axes()
datelist = make_datelist(temp['treenetdate'][wsl_ind_0],temp['treenetdate'][wsl_ind_1],1/6)
ax.xaxis_date()
plt.xticks(datelist[0::6*24*7])
ax.xaxis.set_major_formatter(dt.DateFormatter('%d-%m-%Y %H'))
ax.xaxis.grid(True, which='major', color='0.5',alpha=0.7, linestyle='--',lw=1.5)
saveas='\\temp_comparison_'+treenetstation+'_'
plt.savefig(figpath+saveas+nowdate.strftime('%Y_%m')+'.png',bbox_inches='tight',dpi=400)
plt.close('all')
| [
"noreply@github.com"
] | noreply@github.com |
bece9a5234650c40fc71e3cf6f5df1b6f1412b8e | dcba6985b2b0d4743c2eefa44ecd0ff6dfb0e280 | /day7/note/demo_窗口滚动.py | ea934a97838f987855545fff7c0c918b1893d2a4 | [] | no_license | liyaozr/web_auto | 3c16da295ff5d6c33303f0c6176acf53f8a8cbd6 | 5a33365bfac3fc6afe07a93f9ef7935c30bc3f56 | refs/heads/master | 2021-04-13T21:49:29.677090 | 2020-04-11T07:22:17 | 2020-04-11T07:22:17 | 249,192,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(executable_path=r"d:\chromedriver.exe")
# 添加隐士等待
driver.implicitly_wait(30)
driver.get("https://www.12306.cn/index/")
# e = driver.find_element_by_class_name('mr')
# 将元素滑动到可见区域(为了点击或者其他的进一步操作)
# e.location_once_scrolled_into_view
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(2)
| [
"lyz_fordream@163.com"
] | lyz_fordream@163.com |
24e551fb33f4931cfd6ba6e3a2c1ef762fb638c3 | 8927f6b7a805bc57afacfd12b8e23ad1831d5049 | /TalkingData_Mobile_User_Demographics/TalkingData.py | a500251138cb276e9674ec8994cc5d64f78625b8 | [] | no_license | JackMeiLong/ML.Kaggle | a7cf67ea3651033216b2021600f12c4d40c565e7 | 0186c1c73ab685d408814f1f63d9f842f83377f3 | refs/heads/master | 2020-04-02T19:55:31.653832 | 2016-08-03T12:33:31 | 2016-08-03T12:33:31 | 59,990,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,245 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 02 19:30:51 2016
@author: meil
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.feature_extraction import FeatureHasher
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
# Any results you write to the current directory are saved as output.
# loading data
events = pd.read_csv("events.csv", dtype = {"device_id": np.str}, infer_datetime_format = True, parse_dates = ["timestamp"])
app_events = pd.read_csv("app_events.csv", usecols = [0, 2, 3],
dtype = {"is_active": np.float16, "is_installed": np.float16})
# get hour and drop timestamp
events["hour"] = events["timestamp"].apply(lambda x: x.hour).astype(np.int8)
events.drop("timestamp", axis = 1, inplace = True)
# merge data w/o train or test
events = events.merge(app_events, how = "left", on = "event_id")
#del app_events
events.drop("event_id", axis = 1, inplace = True)
# prep brands
phone = pd.read_csv("phone_brand_device_model.csv", dtype={"device_id": np.str})
# feature hasher
feat = FeatureHasher(n_features=12, input_type="string", dtype=np.float32)
#print(feat)
feat1 = feat.transform(phone["phone_brand"] + " " + phone["device_model"])
#print(feat1)
events = events.merge(pd.concat([phone["device_id"], pd.DataFrame(feat1.toarray())], axis = 1), how = "left", on = "device_id")
print(events.head(5))
#del phone, feat, feat1
print("pre-merging and hashing finished.")
# train steps
train = pd.read_csv("gender_age_train.csv", dtype = {"device_id": np.str},\
usecols = [0, 3])
t2 = train.copy()
train.drop("group", axis = 1, inplace = True)
train = train.merge(events, how = "left", on = "device_id")
train.fillna(-1, inplace = True)
train = train.groupby("device_id").mean().reset_index()
train = train.merge(t2, how ="left", on = "device_id")
le=LabelEncoder()
label = train["group"].copy()
clas=le.fit_transform(label)
train.drop(["group", "device_id"], axis = 1, inplace = True)
del t2
print("train data merged and prepared")
print("-----------------------------------")
print(train.info())
print(train.head(5))
print("-----------------------------------")
# load test data merge with events
test = pd.read_csv("gender_age_test.csv", dtype = {"device_id": np.str})
test = test.merge(events, how = "left", on = "device_id")
#del events
print("test loaded and merged")
test.fillna(-1, inplace = True)
test["hour"] = test["hour"].astype(np.float16)
#test = test.groupby("device_id").mean().reset_index()
#ids = test["device_id"].copy()
#test.drop("device_id", axis = 1, inplace = True)
#
#print("test prepared")
#print("-----------------------------------")
#print(test.info())
#print("-----------------------------------")
#
##Train
#
#rfc=RandomForestClassifier(n_estimators=500, criterion='gini')
#X=train.values
#y=clas
#rfc.fit(X,y)
#
#y_pred=rfc.predict_proba(test)
#
#y_pred_df=pd.DataFrame(y_pred,columns=le.classes_.tolist())
#
#sub =pd.concat([ids,y_pred_df],axis=1)
##del pred
#print("shape of submission: " + str(sub.shape))
#sub.to_csv("sample_submission.csv", index = False)
##del sub
#print("submission saving finished.")
| [
"noreply@github.com"
] | noreply@github.com |
426c7c71d941de6f532c6347173a111373cc4734 | 99052370591eadf44264dbe09022d4aa5cd9687d | /build/cwru/cwru_base/cwru_maps/catkin_generated/pkg.installspace.context.pc.py | e51fe9087a3ee4cec49b7015d1ee43de6bdeda45 | [] | no_license | brucemingxinliu/ros_ws | 11b1a3e142132925d35b3adf929f1000392c5bdc | 45f7e553ea20b79e3e93af5f77a1b14b64184875 | refs/heads/master | 2021-01-24T03:36:47.043040 | 2018-02-26T00:53:37 | 2018-02-26T00:53:37 | 122,892,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "cwru_maps"
PROJECT_SPACE_DIR = "/home/toshiki/ros_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"mxl592@case.edu"
] | mxl592@case.edu |
a6a021b6559101608bd72beda23a30cc3f39cc4c | 4a119bd12f5ced0c43ac8a79e14f935e7d64a39e | /rapidapipractice/urls.py | ab22c9a2c021a68640cf129e39159c1d071f9d81 | [] | no_license | e-elson/rapidapipractice | 00093687b5f1c955a0d2d538ad583c2612dfd6e0 | 274bdf0e17d7ae35037df6844dbd9432df8010ee | refs/heads/master | 2023-03-09T20:09:02.587729 | 2021-02-21T16:00:41 | 2021-02-21T16:00:41 | 340,924,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | """rapidapipractice URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from rapidapipractice import api
router = routers.DefaultRouter()
router.register(r'users', api.views.UserViewSet)
router.register(r'groups', api.views.GroupViewSet)
#setup automatic URL routing
#additionally, we include login URLs for the browsable API.
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
path('api_auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| [
"mac@Elsons-MacBook-Pro.local"
] | mac@Elsons-MacBook-Pro.local |
35d344e65c8cabfaab0f8c3b081f28fe5aef4807 | 49ef8cb8542a0236f2faa50c5661ec938dead38e | /analyze/analyzer.py | aec496fe872b2f852bc1cb22a0a3edc3d169e26e | [] | no_license | julieoh-2013/analysis_pd | 7f12b590a972a6bc50aac1692e6fb14cf902b6b3 | a87c6d9c5a742b26b0de350d07e186915e53bf58 | refs/heads/master | 2020-03-20T00:24:24.738519 | 2018-06-21T12:26:02 | 2018-06-21T12:26:02 | 137,042,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,325 | py | import json
import pandas as pd
import scipy.stats as ss
import numpy as np
import matplotlib.pyplot as plt
import math
def analysis_correlation(resultfiles):
with open(resultfiles['tourspot_visitor'], 'r', encoding='utf-8') as infile:
json_data = json.loads(infile.read())
tourspotvisitor_table = pd.DataFrame(json_data, columns=['count_foreigner', 'date', 'tourist_spot'])
temp_tourspotvisitor_table = pd.DataFrame(tourspotvisitor_table.groupby('date')['count_foreigner'].sum())
#print(temp_tourspotvisitor_table)
results = []
for filename in resultfiles['foreign_visitor']:
with open(filename, 'r', encoding='utf-8') as infile:
json_data = json.loads(infile.read())
foreignvisitor_table = pd.DataFrame(json_data, columns=['country_name', 'date', 'visit_count'])
foreignvisitor_table = foreignvisitor_table.set_index('date')
#인덱스로 머징 (기본은 공통칼럼으로)
merge_table = pd.merge(temp_tourspotvisitor_table,
foreignvisitor_table,
left_index=True,
right_index=True
)
# date count_foreigner country_name visit_count
x = list(merge_table['visit_count'])
y = list(merge_table['count_foreigner'])
print(foreignvisitor_table['country_name'].unique())
country_name = foreignvisitor_table['country_name'].unique().item(0)#<class 'numpy.ndarray'>.item() #ndarray.item(*args)
#상관계수추출 scipy설치
r = correlation_coefficient(x,y)
#r = ss.pearsonr(x,y)[0] #피어슨 상관계수
#r = np.corrcoef(x,y)[0]
#data = {'x':x, 'y':y,'country_name':country_name, 'r':r}
results.append({'x':x, 'y':y,'country_name':country_name, 'r':r})
merge_table['visit_count'].plot(kind='bar')
plt.show()
return results
def analysis_correlation_by_tourspot(resultfiles):
with open(resultfiles['tourspot_visitor'],'r',encoding='utf-8') as infile :
json_data = json.loads(infile.read())
tourspotvisitor_table = pd.DataFrame(json_data, columns=['tourist_spot','date','count_foreigner'])
tourist_spots = tourspotvisitor_table['tourist_spot'].unique()
results = []
for spot in tourist_spots:
data = {'tourspot': spot}
s = tourspotvisitor_table['tourist_spot'] == spot
temp_table = tourspotvisitor_table[s]
temp_table = temp_table.set_index('date')
for filename in resultfiles['foreign_visitor']:
with open(filename, 'r', encoding='utf-8') as infile:
json_data = json.loads(infile.read())
foreignvisitor_table = pd.DataFrame(json_data, columns=['country_name', 'date', 'visit_count'])
foreignvisitor_table = foreignvisitor_table.set_index('date')
# 인덱스로 머징 (기본은 공통칼럼으로)
merge_table = pd.merge(temp_table,
foreignvisitor_table,
left_index=True,
right_index=True
)
country_name = foreignvisitor_table['country_name'].unique().item(0)
# 상관계수추출 scipy설치
x = list(merge_table['count_foreigner'])
y = list(merge_table['visit_count'])
r = correlation_coefficient(x, y)
data['r_' + country_name] = r
results.append(data)
graph_table = pd.DataFrame(results, columns=['tourspot', 'r_중국', 'r_일본', 'r_미국', ])
graph_table = graph_table.set_index('tourspot')
graph_table.plot(kind='bar', rot = 70)
plt.show()
'''
[{'tourspot': '창덕궁', 'r_중국': -0.05787996838309703, 'r_일본': 0.18113398877560832, 'r_미국': 0.15157690000865773},
{'tourspot': '경복궁', 'r_중국': -0.8435333683208608, 'r_일본': -0.6908586912392769, 'r_미국': -0.8041107208313881},
{'tourspot': '창경궁', 'r_중국': 0.3302835585547996, 'r_일본': 0.1897358329486392, 'r_미국': 0.4564453800391374},
#json 로딩, 머지, 상관계수
#장소별로 방문자수와 일본인 입국자수와 상관계수 구하기 장소별 상관계수3개나옴
#dataframe = 안에서 머지
# tourspot r_중국 r_일본 r_미국 중국입국자수 일본입국자수 미국입국자수 중국방문자수....
# 경복궁 0.2 0.33 0.88
# 경복궁 0.33 0.32 0.22 (딕셔너리 리스트에 넣어서)
graph_table = pd.DataFrame(result_analysis, colums=['tourspot','r_중국','r_일본','r_미국',])
graph_table= graph_table.set_index('tourspot')
graph_table.plot(kind='bar')
plt.show()
'''
def correlation_coefficient(x, y):
n = len(x)
vals = range(n)
x_sum = 0.0
y_sum = 0.0
x_sum_pow = 0.0
y_sum_pow = 0.0
mul_xy_sum = 0.0
for i in vals:
mul_xy_sum = mul_xy_sum + float(x[i]) * float(y[i])
x_sum = x_sum + float(x[i])
y_sum = y_sum + float(y[i])
x_sum_pow = x_sum_pow + pow(float(x[i]), 2)
y_sum_pow = y_sum_pow + pow(float(y[i]), 2)
try:
r = ((n * mul_xy_sum) - (x_sum * y_sum)) / math.sqrt(((n * x_sum_pow) - pow(x_sum, 2)) * ((n * y_sum_pow) - pow(y_sum, 2)))
except ZeroDivisionError:
r = 0.0
return r
| [
"julieoh2013@gmail.com"
] | julieoh2013@gmail.com |
3b973ffb45eaa591cd1b658a60bc480604c2573e | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2227/60668/288820.py | 84a8ef21d2e2f35a0dcb5b7d7fa5bc722b3f800e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | def nums_6_CodeSec(n,k):
seen = set()
ans = []
def dfs(node):
for x in map(str, range(k)):
nei = node + x
if nei not in seen:
seen.add(nei)
dfs(nei[1:])
ans.append(x)
dfs("0" * (n - 1))
if n == 1 and k == 2:
print("01")
else:
print(n,k)
if __name__=='__main__':
n = int(input())
k = int(input())
nums_6_CodeSec(n,k) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
a0ee1146dd62520926f5b36fce4f227c2dfe5012 | f1e2118777f957ec07c2955ba9515b632dc099b4 | /python_lab.py | ba0a36afedb103b67f716eb39029fcbe68555fe6 | [] | no_license | RobertCPhillips/CourseraCodingTheMatrix | 63e8d575274b288643a660bfc13f0f43587375b8 | 36e85e0389497a55a5990eb2d5b05b07d59c05ea | refs/heads/master | 2020-05-01T06:51:32.697508 | 2015-04-02T22:04:15 | 2015-04-02T22:04:15 | 30,215,878 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,827 | py | # version code bd6127413fba+
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
## 1: (Task 1) Minutes in a Week
minutes_in_week = 7*24*60
## 2: (Task 2) Remainder
remainder_without_mod = 2304811 - (2304811 // 47)*47
## 3: (Task 3) Divisibility
divisible_by_3 = ((673 + 909) % 3) == 0
## 4: (Task 4) Conditional Expression
x = -9
y = 1/2
expression_val = 2**(y+1/2) if x+10<0 else 2**(y-1/2)
## 5: (Task 5) Squares Set Comprehension
first_five_squares = { x*x for x in {1,2,3,4,5} }
## 6: (Task 6) Powers-of-2 Set Comprehension
first_five_pows_two = { 2**x for x in {0,1,2,3,4} }
## 7: (Task 7) Double comprehension evaluating to nine-element set
X1 = { 2, 3, 4 }
Y1 = { 5, 6, 7 }
## 8: (Task 8) Double comprehension evaluating to five-element set
X2 = { 1, 2, 4 }
Y2 = { 1/2, 1/4, 1/8 }
## 9: (Task 9) Set intersection as a comprehension
S = {1, 2, 3, 4}
T = {3, 4, 5, 6}
# Replace { ... } with a one-line set comprehension that evaluates to the intersection of S and T
S_intersect_T = {x for x in S if x in T}
## 10: (Task 10) Average
list_of_numbers = [20, 10, 15, 75]
# Replace ... with a one-line expression that evaluates to the average of list_of_numbers.
# Your expression should refer to the variable list_of_numbers, and should work
# for a list of any length greater than zero.
list_average = sum(list_of_numbers) / len(list_of_numbers)
## 11: (Task 11) Cartesian-product comprehension
# Replace ... with a double list comprehension over {'A','B','C'} and {1,2,3}
cartesian_product = [[x,y] for x in ['A','B','C'] for y in [1,2,3]]
## 12: (Task 12) Sum of numbers in list of list of numbers
LofL = [[.25, .75, .1], [-1, 0], [4, 4, 4, 4]]
# Replace ... with a one-line expression of the form sum([sum(...) ... ]) that
# includes a comprehension and evaluates to the sum of all numbers in all the lists.
LofL_sum = sum([sum(x) for x in LofL])
## 13: (Task 13) Three-element tuples summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace [ ... ] with a one-line list comprehension in which S appears
zero_sum_list = [(x,y,z) for x in S for y in S for z in S if x+y+z == 0]
## 14: (Task 14) Nontrivial three-element tuples summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace [ ... ] with a one-line list comprehension in which S appears
exclude_zero_list = [(x,y,z) for x in S for y in S for z in S if x+y+z == 0 and (x,y,z) != (0,0,0)]
## 15: (Task 15) One nontrivial three-element tuple summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace ... with a one-line expression that uses a list comprehension in which S appears
first_of_tuples_list = [(x,y,z) for x in S for y in S for z in S if x+y+z == 0 and (x,y,z) != (0,0,0)][0]
## 16: (Task 16) List and set differ
# Assign to example_L a list such that len(example_L) != len(list(set(example_L)))
example_L = [1,1]
## 17: (Task 17) Odd numbers
# Replace {...} with a one-line set comprehension over a range of the form range(n)
odd_num_list_range = {x for x in range(1,100,2)}
## 18: (Task 18) Using range and zip
L = ['A','B','C','D','E']
# In the line below, replace ... with an expression that does not include a comprehension.
# Instead, it should use zip and range.
# Note: zip() does not return a list. It returns an 'iterator of tuples'
range_and_zip = list(zip(range(5),L))
## 19: (Task 19) Using zip to find elementwise sums
A = [10, 25, 40]
B = [1, 15, 20]
# Replace [...] with a one-line comprehension that uses zip together with the variables A and B.
# The comprehension should evaluate to a list whose ith element is the ith element of
# A plus the ith element of B.
list_sum_zip = [sum(x) for x in zip(A,B)]
## 20: (Task 20) Extracting the value corresponding to key k from each dictionary in a list
dlist = [{'James':'Sean', 'director':'Terence'}, {'James':'Roger', 'director':'Lewis'}, {'James':'Pierce', 'director':'Roger'}]
k = 'James'
# Replace [...] with a one-line comprehension that uses dlist and k
# and that evaluates to ['Sean','Roger','Pierce']
value_list = [x[k] for x in dlist]
## 21: (Task 21) Extracting the value corresponding to k when it exists
dlist = [{'Bilbo':'Ian','Frodo':'Elijah'},{'Bilbo':'Martin','Thorin':'Richard'}]
k = 'Bilbo'
#Replace [...] with a one-line comprehension
value_list_modified_1 = [x[k] if k in x else 'NOT PRESENT' for x in dlist] # <-- Use the same expression here
k = 'Frodo'
value_list_modified_2 = [x[k] if k in x else 'NOT PRESENT' for x in dlist] # <-- as you do here
## 22: (Task 22) A dictionary mapping integers to their squares
# Replace {...} with a one-line dictionary comprehension
square_dict = { k:k*k for k in range(100) }
## 23: (Task 23) Making the identity function
D = {'red','white','blue'}
# Replace {...} with a one-line dictionary comprehension
identity_dict = {x:x for x in D}
## 24: (Task 24) Mapping integers to their representation over a given base
base = 10
digits = set(range(base))
# Replace { ... } with a one-line dictionary comprehension
# Your comprehension should use the variables 'base' and 'digits' so it will work correctly if these
# are assigned different values (e.g. base = 2 and digits = {0,1})
representation_dict = {x*base**2+y*base+z:(x,y,z) for x in digits for y in digits for z in digits}
## 25: (Task 25) A dictionary mapping names to salaries
id2salary = {0:1000.0, 1:1200.50, 2:990}
names = ['Larry', 'Curly', 'Moe']
# Replace { ... } with a one-line dictionary comprehension that uses id2salary and names.
listdict2dict = {names[x]:id2salary[x] for x in range(len(names)) if x in id2salary}
## 26: (Task 26) Procedure nextInts
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def nextInts(L): return [x+1 for x in L]
## 27: (Task 27) Procedure cubes
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def cubes(L): return [x**3 for x in L]
## 28: (Task 28) Procedure dict2list
# Input: a dictionary dct and a list keylist consisting of the keys of dct
# Output: the list L such that L[i] is the value associated in dct with keylist[i]
# Example: dict2list({'a':'A', 'b':'B', 'c':'C'},['b','c','a']) should equal ['B','C','A']
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def dict2list(dct, keylist): return [dct[keylist[i]] for i in range(len(keylist))]
## 29: (Task 29) Procedure list2dict
# Input: a list L and a list keylist of the same length
# Output: the dictionary that maps keylist[i] to L[i] for i=0,1,...len(L)-1
# Example: list2dict(['A','B','C'],['a','b','c']) should equal {'a':'A', 'b':'B', 'c':'C'}
# Complete the procedure definition by replacing { ... } with a one-line dictionary comprehension
def list2dict(L, keylist): return {keylist[i]:L[i] for i in range(len(keylist))}
| [
"robert.c.phillips@live.com"
] | robert.c.phillips@live.com |
a2f8c5c8659f14cbc8e532f235de3d428c33e977 | d24639b7f843539ba181f0b091af7f03c410af31 | /--- Data-Science ---/Section 39 - Artificial Neural Networks (ANN)/ANN.py | e88ba64c0f8d70b2b7f25279452afc045f57c811 | [] | no_license | roilait/Machine-Learning | 8856eff09e2ed88d2aa6c120f45612df95fab49d | 95b9993601123e562e4e8232bf9c2356e961c0fd | refs/heads/master | 2021-01-11T22:23:03.938541 | 2017-06-12T04:48:30 | 2017-06-12T04:48:30 | 78,955,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,365 | py |
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #FFFFFF
# ---------------------------
# Part I - Data Preprocessing
# ---------------------------
DATESET_DIR = 'dataSet/Churn_Modelling.csv'
DROPCOLS = [0, 1, 2, 13] # Remove these columns in the dataSet, Independent Variable matrix
KEEPCOLS = (13,) # Keep these columns in the dataSet, Dependent Variables matrix
CATEGORICALDATACOLS = [1, 2] # Transforme the categorical columns to Label (i.e, 1,2,....)
DAMMYVARIABLES = [1] # Transforme the dammy variable to onehote variables
# Saving dirs
SAVING_FOLDER = 'dataSet/model.hdf5'
FILE_SAVED = 'model.h5'
# Model parameters
batch_size, nb_epochs = 10, 20
nb_classes = 1
# -----------------------------------------------------------------------
# I-1 - Check the number of variables, also check the quality of the import
# -----------------------------------------------------------------------
# %%
# Get the number of variables in the dataset
dataset = pd.read_csv(DATESET_DIR)
print dataset.info()
# %%
''' Besides the number of variables, also check the quality of the import:
# are the data types correct?
# Did all the rows come through?
# Are there any null values that you should take into account when you're cleaning up the data?
# You might also want to check out your data with more than just info()'''
# %%
# First rows of Dataset
print '-----------------------------'
print '=== First rows of Dataset ==='
print '-----------------------------'
dataset.head()
# %%
# Last rows of Dataset
print '----------------------------'
print '=== Last rows of Dataset ==='
print '----------------------------'
dataset.tail()
# %%
# Take a sample of 5 rows of Dataset
print '------------------------------------------'
print '=== Take a sample of 5 rows of Dataset ==='
print '------------------------------------------'
dataset.sample(5)
# %%
# Describe Dataset
print '------------------------'
print '=== Describe Dataset ==='
print '------------------------'
dataset.describe()
# %%
# Double check for null values in dataset
print '-----------------------------------------------'
print '=== Double check for null values in dataset ==='
print '-----------------------------------------------'
pd.isnull(dataset)
# %%
# Correlation Matrix gives insights more quickly about which variables correlate:
print dataset.corr()
print dataset.var()
# %%
# -------------------------
# I-2 Importing the dataset
# -------------------------
class DataProcessing(object):
@staticmethod
def SplitData(DataSet, dropcols, keepcols, DATESET_DIR):
# Choose the Variables that have impact in the output " decision "
# Independent Variable (IV) or Inputs or " Matrix of features "
X = DataSet.drop(dataset.columns[dropcols], axis = 1)
X = X.iloc[:,:].values
# X_data = pd.DataFrame(X)
# Dependent Variables (DV) or outputs
if (len(keepcols)>1):
y = pd.read_csv(DATESET_DIR, usecols= keepcols)
else:
y = dataset.iloc[:, keepcols[0]].values
# Return these values
return X, y
X, y = DataProcessing.SplitData(dataset, DROPCOLS, KEEPCOLS,DATESET_DIR)
# -----------------------------
# I-3 Encoding categorical data
# -----------------------------
class EncodingCategoricalData(object):
@staticmethod
def LabelEncoder(X, index):
from sklearn.preprocessing import LabelEncoder
# index is the feature index
labelEncoder_IV = LabelEncoder()
X[:, index] = labelEncoder_IV.fit_transform(X[:, index])
# Return these values
return X
@staticmethod
def OneHotEncoder(X, index):
from sklearn.preprocessing import OneHotEncoder
# index is the feature index
onehotencoder = OneHotEncoder(categorical_features = [index])
X = onehotencoder.fit_transform(X).toarray()
# Return the value
return X
# --------------------------------------------------------------
# I-4 Tranform the categorical data to the number, 0, 2, 3, ....
# --------------------------------------------------------------
import random
for index in CATEGORICALDATACOLS:
var = random.choice([i for i in xrange(len(X[:,index]))])
if (type(X[:,index][var])!=int):
X = EncodingCategoricalData.LabelEncoder(X, index)
else:
print ('Oops!, ONE OR MORE OF THE INDEX IS NOT CATEGORICAL DATA')
# ----------------------------------------------------------------------------
# I-5 Dummy Encoding, the LabelEncoded Data are changed as a vector of 0 and 1
# ----------------------------------------------------------------------------
for index in DAMMYVARIABLES:
var = random.choice([i for i in xrange(len(X[:,index]))])
if (type(X[:,index][var])==int):
X = EncodingCategoricalData.OneHotEncoder(X, index)
else:
print ('Oops!, ONE OR MORE OF THE INDEX IS NOT DAMMY VARIABLE(S)')
# -----------------------------------
# I-6 Avoid one of the Dummy Variable
# -----------------------------------
rows, cols = X.shape
DEMMYVARIABLESAVOIDED = [0]
X = X[:, [i for i in xrange(cols) if i not in DEMMYVARIABLESAVOIDED]]
# ---------------------------------------------------------------
# I-7 Splitting the dataset into the Training set and Testing set
# ---------------------------------------------------------------
from sklearn.model_selection import train_test_split
seed = 2
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=seed)
X_valid, X_test, y_valid, y_test = train_test_split(X, y, test_size=0.5, random_state=seed)
# ---------------------------------------------------
# I-8 Features scaling, Standardization/Normalization
# ---------------------------------------------------
from sklearn.preprocessing import StandardScaler
# Define the scaler
scaler = StandardScaler().fit(X_train)
# Scale the train set
X_train = scaler.transform(X_train)
# Scale the test set
X_test = scaler.transform(X_test)
# ---------------------------------------
# Part II - Let now to make the ANN model !
# -----------------------------------------
# II-1 Importing the keras libraries and pack
import keras
from keras.optimizers import SGD, RMSprop, adam
# II-1-1 Importing the Sequential that allow to initilaze the ANN
from keras.models import Sequential
# II-1-2 Importing the Dense that allow to build layers
from keras.layers import Dense, Dropout
class Model(object):
def create_layer(self, classifier, units, activation, input_dim = False):
if (input_dim!=False):
# II-2-1 Adding the input layer and the first hidden layer
classifier.add(Dense(units,input_dim=input_dim, kernel_initializer='uniform', activation=activation))
else:
# II-2-2 Adding the second hidden layer
classifier.add(Dense(units, kernel_initializer='uniform', activation=activation))
# Return the model
return classifier
def plot_models(self, model):
model.summary()
# Save the model as pgn file
from keras.utils import plot_model
plot_model(model, to_file = 'model.png', show_shapes = True)
def evaluate_model(self, model,X_test, y_test):
score = model.evaluate(X_test, y_test, verbose = 1)
loss, accurancy = score[0], score[1]
print 'Test loss :', loss, '- Test accurancy :', accurancy
def save_models(self, method=None):
# model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
# del model # deletes the existing model
# Create the folder, if it does not exist
if not os.path.exists(SAVING_FOLDER):
os.makedirs(SAVING_FOLDER)
# Saved file dir
FILE_DIR = '/'.join([SAVING_FOLDER, FILE_SAVED])
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
if (method=='early'):
# Early Stoping, Regularization
earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto')
used_method = earlystop
elif (method=='redu'):
# reduce Lr, Regularization
reducelr = ReduceLROnPlateau(monitor = 'val_loss', factor=0.1, patience=10, verbose=0, mode='auto',
epsilon=0.0001, cooldown=0, min_lr=0)
used_method = reducelr
else:
# Checkpoint, Regularization
checkpoint = ModelCheckpoint(FILE_DIR, monitor='val_loss', verbose=0, save_best_only=False,
save_weights_only=False, mode='auto', period=1)
used_method = checkpoint
return [used_method]
#def save_mode_weights(self,):
def load_models(self):
File_DIR = '/'.join([SAVING_FOLDER, FILE_SAVED])
# Return the saved model
return load_model(FILE_DIR)
#class EvaluateModel(object):
#def model_evaluation(self):
''' if we deal wilh multiclasses, the 1 will be remplaced by the nber of
classes and sigmod will be changed by softmax '''
# II-2 Initializing of the ANN model
model = Sequential()
# II-2-1 Build the model
rows, cols = X.shape
Model = Model()
# II-2-2 First hidden layer
input_dim, hidden_layer, activation, pdrop = cols, 120, 'relu', 0.2
model = Model.create_layer(model, hidden_layer, activation, input_dim)
model.add(Dropout(pdrop))
# II-2-3 Second hidden layer
hidden_layer, activation, pdrop = 120, 'relu', 0.5
model = Model.create_layer(model, hidden_layer, activation)
model.add(Dropout(pdrop))
# II-2-4 Output layer
''' if we deal with multiclasses, the 1 will be remplaced by the nber of classes and sigmod will be changed by softmax '''
''' choice the right activation function '''
if (nb_classes>1):
# Activation function for multiclasses
output_layer, activation = nb_classes, 'softmax'
else:
# Activation function for single output
output_layer, activation = nb_classes, 'sigmoid'
model = Model.create_layer(model, output_layer, activation)
# II-3 Compiling the ANN model
# II-3-1 let s define the optimizer of the model using SGD + momentum (how original).
lrate = 0.01
decay = lrate/nb_epochs
sgd = SGD(lr = lrate, momentum = 0.9, decay = decay, nesterov = False) # 'adam'
# II-3-2 Choice the right compile according the output classes numbers
if (nb_classes>1):
# For a multi-class classification problem
model.compile(optimizer = sgd, loss = 'categorical_crossentropy', metrics = ['accuracy'])
else:
# For a binary classification problem
model.compile(optimizer = sgd, loss = 'binary_crossentropy', metrics = ['accuracy'])
# II-4 Fitting the ANN to the Training set
model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epochs, callbacks = Model.save_models(),verbose=1)
# II-5 Evaluate the model on the Testing set
''' it's always a good idea to try out different evaluation metrics '''
Model.evaluate_model(model, X_test, y_test)
#%% ------------------------------------------------------
# Part III - Making the prediction and evaluating the model
# ---------------------------------------------------------
# III-1 Predicting the Test set results
''' To valide the model, the Test accuracy should be close to the Train accurancy '''
y_pred = model.predict(X_valid)
# Import the modules from `sklearn.metrics`
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, cohen_kappa_score
# Confusion matrix
confusion_matrix(y_valid, y_pred)
# Precision
precision_score(y_valid, y_pred)
# Recall
recall_score(y_valid, y_pred)
# F1 score
f1_score(y_valid, y_pred)
# Cohen's kappa
cohen_kappa_score(y_valid, y_pred)
y_pred = (y_pred > 0.5) # y_pred = False if y_pred <=5
# IV - Making the Confusion Matrix, this is a function
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) # Evaluate The Model Performance
| [
"noreply@github.com"
] | noreply@github.com |
5ba9dd8e415b18ab6d4f9b3a05f2a2cfdb708008 | 6c4553622779bdf33dcaa9f2c909a69a08c69fb7 | /class_exam200_016_059/exam22.py | 7b1cd2bb1228100d65d9a6e7b296ddbd41eac6c1 | [] | no_license | choijeonggyu/talented_kaist_class_1st | dc0082ed08f2d88f7daa8fa3ed1e73b073d6b30a | 025f944864cb5a29c5a81cbe29e7b27279f886d7 | refs/heads/master | 2020-03-30T01:15:20.076166 | 2018-10-30T23:55:04 | 2018-10-30T23:55:04 | 150,567,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | a = True
b = False
print(a == 1) # True 가 출력됨
print(b != 0) # False가 출력됨
| [
"choi.jeonggyu@gmail.com"
] | choi.jeonggyu@gmail.com |
0b0793ee6ad33cc94c08ba64cebda6e147641e4e | 3a5db7fce2a15fea9b2f87ef09d54a7aaac3efd6 | /exercises/exc_02_05.py | c57f7d8d1267b49a13cc61a76d4b1bcfac9fe35f | [
"MIT"
] | permissive | Rhinrei/course-starter-python | 31362a6b4fd42def9130020d5e79e709f96b5cfd | 69978653b64e201d52cc6cdf831062137c8f7d81 | refs/heads/master | 2023-01-20T11:25:08.695250 | 2019-06-23T10:40:49 | 2019-06-23T10:40:49 | 183,874,669 | 0 | 0 | MIT | 2023-01-11T20:29:33 | 2019-04-28T07:45:37 | Jupyter Notebook | UTF-8 | Python | false | false | 262 | py | import pandas as pd
from sklearn.feature_extraction.text import ____
pd.set_option('display.expand_frame_repr', False)
tweets_csv = pd.read_csv("exercises/tweets.csv")
tweets_df = pd.DataFrame(tweets_csv)
vectorizer = ____
X = ____(____.values)
Y = ____.values | [
"rhinrei@gmail.com"
] | rhinrei@gmail.com |
dc598447b6086cc907684e1f2c7547d0a9d2d1ad | 6c9136be6e491ed8b0aff458c14ec32d4898d328 | /app.py | a791d1e28db4ea7079946dd3ff31a08097c3bcd2 | [
"MIT"
] | permissive | qube13/deepdream-seminar | 3e198fc9a63b6ec7ec6d9711724a7a0e256b420a | 2255d900e926a2f4ff916050ef544a3d7fb4a80b | refs/heads/master | 2023-08-08T22:19:30.831402 | 2019-11-06T20:09:51 | 2019-11-06T20:09:51 | 189,007,674 | 0 | 0 | MIT | 2023-07-22T06:54:49 | 2019-05-28T10:34:41 | Python | UTF-8 | Python | false | false | 1,912 | py | # -*- coding: utf-8 -*-
import os
from flask import Flask, render_template, request
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import SubmitField
from helpers.neural_style import stylize
from helpers.utils import base64_img, init_images
PORT=5050
work_dir = os.getcwd()
# define app and config upload form
app = Flask(__name__)
app.config['SECRET_KEY'] = 'I have a dream'
upload_dir = work_dir+"/uploads/"
app.config['UPLOADED_PHOTOS_DEST'] = upload_dir
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app)
class UploadForm(FlaskForm):
photo = FileField(u'Choose file',validators=[FileAllowed(photos, u'Image only!'), FileRequired(u'File was empty!')])
submit = SubmitField(u'Style')
# define what happens
@app.route('/', methods=['GET', 'POST'])
def upload_file():
static_path = 'static/images'
included_extensions = ['jpg','jpeg','png']
img_paths = [fn for fn in os.listdir(static_path) if any(fn.endswith(ext) for ext in included_extensions)]
captions = [os.path.splitext(path)[0] for path in img_paths]
form = UploadForm()
if form.validate_on_submit():
img_name = photos.save(form.photo.data)
model_name = request.values.get("model")
content_image = upload_dir + img_name
model = work_dir+"/models/"+model_name+".model"
output_img = stylize(content_image, model, 0)
os.remove(content_image)
file_url = base64_img(output_img)
return render_template('finished.html',file_url=file_url)
return render_template('index.html', form=form, img_urls=img_paths,
idxs = range(len(img_paths)),captions=captions)
if __name__ == '__main__':
init_images()
app.run(port=PORT) | [
"christoph.retter@icloud.com"
] | christoph.retter@icloud.com |
aefcedb2d306ed5b5a5ccf37b1b4f31d064f7fcb | da0a3820236a671b149959f40a93d6dce328a09c | /sampling.py | 912ec1f00476bc28f0759521712f81c2591c725a | [] | no_license | phesami/bpr | e5ac5bf714a9c20ecb0ae262ab8183a0930e591c | a65c545a5c6d85a9fa77e039df381a4613d1e682 | refs/heads/master | 2021-01-01T06:48:49.138957 | 2017-03-26T02:30:46 | 2017-03-26T02:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,344 | py | import random
# sampling strategies
class Sampler(object):
def __init__(self,sample_negative_items_empirically):
self.sample_negative_items_empirically = sample_negative_items_empirically
def init(self,data,max_samples=None):
self.data = data
self.num_users,self.num_items = data.shape #expects a user/item matrix
self.max_samples = max_samples
def sample_user(self):
u = self.uniform_user()
num_items = self.data[u].getnnz()
assert(num_items > 0 and num_items != self.num_items)
return u
def sample_negative_item(self,user_items):
j = self.random_item()
while j in user_items:
j = self.random_item()
return j
def uniform_user(self):
return random.randint(0,self.num_users-1)
def random_item(self):
"""sample an item uniformly or from the empirical distribution
observed in the training data
"""
if self.sample_negative_items_empirically:
# just pick something someone rated!
u = self.uniform_user()
i = random.choice(self.data[u].indices)
else:
i = random.randint(0,self.num_items-1)
return i
def num_samples(self,n):
if self.max_samples is None:
return n
return min(n,self.max_samples)
class UniformUserUniformItem(Sampler):
def generate_samples(self,data,max_samples=None):
self.init(data,max_samples)
#nnz is the count of non-zero entries
for _ in xrange(self.num_samples(self.data.nnz)):
u = self.uniform_user()
# sample positive item
i = random.choice(self.data[u].indices)
j = self.sample_negative_item(self.data[u].indices)
yield u,i,j
class UniformUserUniformItemWithoutReplacement(Sampler):
def generate_samples(self,data,max_samples=None):
self.init(data,max_samples)
# make a local copy of data as we're going to "forget" some entries
self.local_data = self.data.copy()
for _ in xrange(self.num_samples(self.data.nnz)):
u = self.uniform_user()
# sample positive item without replacement if we can
user_items = self.local_data[u].nonzero()[1]
if len(user_items) == 0:
# reset user data if it's all been sampled
for ix in self.local_data[u].indices:
self.local_data[u,ix] = self.data[u,ix]
user_items = self.local_data[u].nonzero()[1]
i = random.choice(user_items)
# forget this item so we don't sample it again for the same user
self.local_data[u,i] = 0
j = self.sample_negative_item(user_items)
yield u,i,j
class UniformPair(Sampler):
def generate_samples(self,data,max_samples=None):
self.init(data,max_samples)
for _ in xrange(self.num_samples(self.data.nnz)):
idx = random.randint(0,self.data.nnz-1)
u = self.users[self.idx]
i = self.items[self.idx]
j = self.sample_negative_item(self.data[u])
yield u,i,j
class UniformPairWithoutReplacement(Sampler):
def generate_samples(self,data,max_samples=None):
self.init(data,max_samples)
idxs = range(self.data.nnz)
random.shuffle(idxs)
self.users, self.items = self.data.nonzero()
self.users = self.users[idxs]
self.items = self.items[idxs]
self.idx = 0
for _ in xrange(self.num_samples(self.data.nnz)):
u = self.users[self.idx]
i = self.items[self.idx]
j = self.sample_negative_item(self.data[u].todense())
self.idx += 1
yield u,i,j
class ExternalSchedule(Sampler):
def __init__(self,filepath,index_offset=0):
self.filepath = filepath
self.index_offset = index_offset
def generate_samples(self,data,max_samples=None):
self.init(data,max_samples)
f = open(self.filepath)
samples = [map(int,line.strip().split()) for line in f]
random.shuffle(samples) # important!
num_samples = self.num_samples(len(samples))
for u,i,j in samples[:num_samples]:
yield u-self.index_offset,i-self.index_offset,j-self.index_offset
| [
"eggie5@gmail.com"
] | eggie5@gmail.com |
944ba56ff7aca83e2eb127f4da13c740715ee035 | f57e34d0a708ea1139f80f8e5b968c55f6fd2621 | /dassl/utils/logger.py | 9b37774ef48a52e330761d229098b3e3627aa44b | [
"MIT"
] | permissive | MohammadJavadD/Dassl.pytorch | bfdac8f28781af5f198eb7a1318043e04dc544d3 | 5e83fdce6fb51d8d4fbe0441a016eade2ebda423 | refs/heads/master | 2022-07-06T06:33:53.655489 | 2020-05-11T20:55:24 | 2020-05-11T20:55:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import os
import sys
import time
import os.path as osp
from .tools import mkdir_if_missing
__all__ = ['Logger', 'setup_logger']
class Logger(object):
"""Write console output to external text file.
Imported from `<https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py>`_
Args:
fpath (str): directory to save logging file.
Examples::
>>> import sys
>>> import os.path as osp
>>> save_dir = 'output/experiment-1'
>>> log_name = 'train.log'
>>> sys.stdout = Logger(osp.join(save_dir, log_name))
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def setup_logger(output=None):
if output is None:
return
if output.endswith('.txt') or output.endswith('.log'):
fpath = output
else:
fpath = osp.join(output, 'log.txt')
if osp.exists(fpath):
# make sure the existing log file is not over-written
fpath += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(fpath)
| [
"k.zhou@surrey.ac.uk"
] | k.zhou@surrey.ac.uk |
b37a8243749b1cbb1fb274960fb8cc5a20a84f1b | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r10p1/Gen/DecFiles/options/14195637.py | 19a1f73398d726879f251757b9c3658f6d49a240 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/14195637.py generated: Wed, 25 Jan 2017 15:25:33
#
# Event Type: 14195637
#
# ASCII decay Descriptor: [B_c+ => (D*_s+ => (D_s+ -> K- K+ pi+) gamma) (D*(2007)~0 => (D~0 -> K+ pi-) pi0) ]CC
#
from Configurables import Generation
Generation().EventType = 14195637
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "BcVegPyProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bc_DsstDst0,Dsgamma,KKpi,D0pi0,Kpi=BcVegPy,DecProdCut,HELAMP010.dec"
Generation().Special.CutTool = "BcDaughtersInLHCb"
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
b006d9d305260c635ceea0dbeff8ee13497e9ff3 | 1e8465b285acc46ab99187ee726c1c860720ee8d | /raft3d/blocks/gru.py | dfd9919e28e1accbc4eda882f4553f5c5d244595 | [
"BSD-3-Clause"
] | permissive | cv-stuttgart/RAFT-3D | bde1c9a654619748e07ee90d8e3971eca88f6cf8 | 285da377ea4c08dc1bfee8cfa78b5ca168df5d3d | refs/heads/master | 2023-08-20T14:47:01.597414 | 2021-10-29T06:36:32 | 2021-10-29T06:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128, dilation=4):
super(ConvGRU, self).__init__()
self.hidden_dim = hidden_dim
self.convz1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convz2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
self.convr1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convr2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
self.convq1 = nn.Conv2d(hidden_dim, hidden_dim, 3, padding=1)
self.convq2 = nn.Conv2d(hidden_dim, hidden_dim, 3, dilation=dilation, padding=dilation)
def forward(self, h, *inputs):
iz, ir, iq = 0, 0, 0
for inp in inputs:
inp = inp.split([self.hidden_dim]*3, dim=1)
iz = iz + inp[0]
ir = ir + inp[1]
iq = iq + inp[2]
z = torch.sigmoid(self.convz1(h) + self.convz2(h) + iz)
r = torch.sigmoid(self.convr1(h) + self.convr2(h) + ir)
q = torch.tanh(self.convq1(r*h) + self.convq2(r*h) + iq)
h = (1-z) * h + z * q
return h
| [
"zachteed@gmail.com"
] | zachteed@gmail.com |
003afde634b2dbdf9963104880cecb922fe56bfa | c6818c06aacb1eca1fffa8bbc51b6f3aac25c177 | /acre/settings.py | 039fa2a786d7f1bc584f1052a125472bea4cb0ef | [] | no_license | Acon94/ACRE | 2d0769780c9f81eba05085ffd8b0af225666d6de | 73622a6dc4ba0f30e8d3e90b02d23c8efd14a5e1 | refs/heads/master | 2022-08-02T02:07:53.004308 | 2020-05-29T15:25:50 | 2020-05-29T15:25:50 | 267,840,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,898 | py | """
Django settings for acre project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
from django.contrib.messages import constants as messages
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '14tmc3zctdr=(@n&nwwoq#ms9v#)x-3*!#!5pl&%gi=v!0uh-k'
GOOGLE_MAPS_API_KEY = 'AIzaSyCXKJ3T-HIJwFLuS4aBq15Lg6tsiPcAXJ0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pages.apps.PagesConfig',
'listings.apps.ListingsConfig',
'realtors.apps.RealtorsConfig',
'accounts.apps.AccountsConfig',
'contacts.apps.ContactsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'multiselectfield',
'django_google_maps',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'acre.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'acre.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'acredb',
'USER':'postgres',
'password':'Oldhead@12',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS =[
os.path.join(BASE_DIR, 'acre/static')
]
# media Folder settings
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
#messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
try:
from .local_settings import *
except ImportError:
pass
| [
"andrew@Andrews-MacBook-Pro.local"
] | andrew@Andrews-MacBook-Pro.local |
625e3de4d65d7963f766548a6552be5ceb7e07ad | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/types/mobile_device_constant_service.py | 3cc926700a80a75c7dc7e1493076bc54422a080b | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.services',
marshal='google.ads.googleads.v8',
manifest={
'GetMobileDeviceConstantRequest',
},
)
class GetMobileDeviceConstantRequest(proto.Message):
r"""Request message for
[MobileDeviceConstantService.GetMobileDeviceConstant][google.ads.googleads.v8.services.MobileDeviceConstantService.GetMobileDeviceConstant].
Attributes:
resource_name (str):
Required. Resource name of the mobile device
to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
707c92b00f26da27a2d1ecca127c641241976de6 | fa451e1bdd074d62fb6f1c089954d1e4b0257d5b | /43/solution.py | efd8604e79fe9eaa14d52534eb972ac8d93acafc | [
"Apache-2.0"
] | permissive | live-wire/leetcode-solutions | e985b4c61814322372f8a953b14c54544eb50f5e | c38ed2f127d04a6fb9a34c4d0d93e168812ef646 | refs/heads/main | 2023-08-29T09:11:19.911922 | 2021-10-21T10:58:15 | 2021-10-21T10:58:15 | 305,688,177 | 3 | 4 | Apache-2.0 | 2020-10-25T19:43:25 | 2020-10-20T11:37:49 | Python | UTF-8 | Python | false | false | 233 | py | class Solution:
def multiply(self, num1: str, num2: str) -> str:
if not num1 or not num2:
return None
num1 = int(num1)
num2 = int(num2)
return str(num1*num2)
| [
"anandkanav92@gmail.com"
] | anandkanav92@gmail.com |
457ba8dbc83eec19f55e79ee17cc5696a7c1040c | db4a08028f7d1d65752c82abed17a5532f4637a4 | /bicycle.py | b2488e9dad3730039d1c88e0e8b15b06df122be5 | [] | no_license | mukuld/python | a80efaaa6b7c361c96cb9e6b756d96ee01c66ffa | 3acfcf8a4244ab8a6890d24bdb71d2ac8b60e76b | refs/heads/master | 2023-08-18T04:34:31.715644 | 2023-01-06T07:19:03 | 2023-01-06T07:19:03 | 119,087,903 | 0 | 1 | null | 2023-07-25T21:01:33 | 2018-01-26T18:26:57 | Python | UTF-8 | Python | false | false | 2,532 | py | # Python Programme Number 93
# Class Generation: Inheritance
# Programmer: Mukul Dharwadkar
# Date: 30 July 2010
import cPickle, shelve
class Bicycle(object):
"""A bicycle creating class"""
total = 0
def __init__(self, name, cadence=0, speed=0, gear=1):
print "A new bicycle is manufactured."
self.name = name
print "Its name is:", self.name
self.cadence = cadence
print "Its cadence is:", self.cadence
self.speed = speed
print "Its speed is:", self.speed
self.gear = gear
print "Its gear is:", self.gear
Bicycle.total += 1
def chg_cadence(self, cadence):
self.cadence = cadence
def inc_speed(self, speedup):
self.speed = speed + speedup
def dec_speed(self, brake):
self.speed = speed - brake
def chg_gear(self, gear):
self.gear = gear
def total_bikes():
print "You currently have", Bicycle.total, "bicycle(s)"
total_bikes = staticmethod(total_bikes)
def print_state(self):
print "The current state of your bike is given below"
print "The name of your bike is:", self.name
print "The cadence is:", self.cadence
print "The speed is:", self.speed
print "The gear is:", self.gear
def bike_factory():
name = raw_input("What would like to name your bike? ")
cadence = input("What should be the cadence? ")
speed = input("What should be the speed ")
gear = input("What gear should be it in? ")
bike = Bicycle(name, cadence, speed, gear)
dump_data(bike)
return bike
def dump_data(self):
file = open("bikes.dat", "w")
cPickle.dump(bike(), file)
file.close()
def main():
# Bicycle.total_bikes()
# bike1 = Bicycle(50, 18, 4)
choice = None
while choice != 0:
print \
"""
Bike Factory
0 - Exit
1 - Make a bike
2 - unDump Data
3 - Change speed
4 - Change gear
"""
choice = input("What would you like to do? ")
if choice == 0:
print "Good-bye"
elif choice == 1:
bike_factory()
elif choice == 2:
dump_data()
# bike_factory()
Bicycle.total_bikes()
# bike1.chg_cadence(60)
# bike1.print_state()
# Bicycle.total_bikes()
main() | [
"mukul.dharwadkar@gmail.com"
] | mukul.dharwadkar@gmail.com |
3040177c86a7ba3cd484843a0188b3dff3126388 | 60c6b8efe7ac85ac99fb727a44ec51ca9e9069df | /python/lswrc.py | bcf4cbfaea718a29c144b4e146b9c64b9b332673 | [
"Apache-2.0"
] | permissive | misslibra/algorithms | f3681130fc9af2f7cc9b4fb91d6cd81b2c14f595 | 31648ee7a25710ff5340595222525721116f7e84 | refs/heads/master | 2020-04-17T22:12:15.209907 | 2019-01-14T11:17:01 | 2019-01-14T11:17:01 | 166,984,498 | 1 | 0 | Apache-2.0 | 2019-01-22T11:45:29 | 2019-01-22T11:45:29 | null | UTF-8 | Python | false | false | 1,496 | py | #!/usr/bin/env python
# encoding: utf-8
# author: cappyclearl
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) is 0:
return len(s)
# 字符之前出现的位置
char_position = dict()
# s[i-1]为末尾的最长无重复子串长度
pre_arr = []
for i, _ in enumerate(s):
last_post_of_char = char_position.get(s[i])
# 第一次出现当前字符
if last_post_of_char is None:
pre_arr.append(1 if i == 0 else pre_arr[i - 1] + 1)
char_position[s[i]] = i
else:
# 不是第一次出现,统计前一个字符的lswrc长度
a_pos = last_post_of_char + 1
un_repeat_len = pre_arr[i - 1]
b_pos = i - un_repeat_len
if a_pos >= b_pos:
# 当前位置的lswrc
pre_arr.append(i - a_pos + 1)
else:
pre_arr.append(i - b_pos + 1)
# 更新当前字符位置,之前的不再记录
char_position[s[i]] = i
return max(pre_arr)
if __name__ == '__main__':
s = Solution()
assert s.lengthOfLongestSubstring('bbbbbb') == 1
assert s.lengthOfLongestSubstring('') == 0
assert s.lengthOfLongestSubstring('abcabcbb') == 3
assert s.lengthOfLongestSubstring('pwwkew') == 3
| [
"cappyclear@gmail.com"
] | cappyclear@gmail.com |
7cc61005f3bbaf56b3658836405ef32f6a45fc42 | bd648de16cef62807071164304b860bec2fbaef9 | /historical_data/admin.py | 6a225e1ef6cbe11b4049ebeffafe541c83cdfe99 | [] | no_license | webclinic017/rest_api_finance_yahoo | c1b8fbdcde46ad25aa52e6f69dee14619e1c5a23 | 3d6002c82a3c6b2ce63c4d1034700390c54e05fd | refs/heads/main | 2023-08-05T10:08:58.586354 | 2021-09-24T18:39:23 | 2021-09-24T18:39:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | from django.contrib import admin
from .models import FinanceData
class PanelAdmin(admin.ModelAdmin):
list_display = ['id', 'symbol', 'date']
list_display_links = ['id', 'symbol', 'date']
search_fields = ['id', 'symbol', 'date']
admin.site.register(FinanceData, PanelAdmin)
| [
"kalmykovalexander28@gmail.com"
] | kalmykovalexander28@gmail.com |
14583aca318c99b89df9bdf4a06f82d336f413bd | e3e5a0618b91fe58318763f2186422b95e6edd10 | /baidupcs_py/baidupcs/api.py | 68e6ac5e0ad2875ab752460896b24e449357f92e | [
"MIT"
] | permissive | hfh1999/BaiduPCS-Py | ddd66ff4d33d0e609021280a1edc040d51654940 | 4cf77bba7afbc8c82e0bc6ecd4ffc4a66aab1c71 | refs/heads/master | 2023-02-20T06:02:08.897248 | 2021-01-26T08:56:37 | 2021-01-26T08:56:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,950 | py | from typing import Optional, Dict, List, Tuple, Callable
from io import BytesIO
import datetime
from baidupcs_py.common.io import RangeRequestIO
from baidupcs_py.baidupcs.pcs import BaiduPCS, BaiduPCSError, M3u8Type
from baidupcs_py.baidupcs.inner import (
PcsFile,
PcsMagnetFile,
PcsSharedLink,
PcsSharedPath,
FromTo,
PcsAuth,
PcsUser,
PcsQuota,
CloudTask,
)
from requests_toolbelt import MultipartEncoderMonitor
from PIL import Image
from rich import print
from rich.prompt import Prompt
class BaiduPCSApi:
def __init__(
self,
bduss: Optional[str] = None,
stoken: Optional[str] = None,
ptoken: Optional[str] = None,
cookies: Dict[str, Optional[str]] = {},
user_id: Optional[int] = None,
):
self._baidupcs = BaiduPCS(
bduss, stoken=stoken, ptoken=ptoken, cookies=cookies, user_id=user_id
)
@property
def cookies(self) -> Dict[str, Optional[str]]:
return self._baidupcs.cookies
def bdstoken(self) -> Optional[str]:
return self._baidupcs.bdstoken()
def quota(self) -> PcsQuota:
info = self._baidupcs.quota()
return PcsQuota(quota=info["quota"], used=info["used"])
def meta(self, *remotepaths: str) -> List[PcsFile]:
info = self._baidupcs.meta(*remotepaths)
return [PcsFile.from_(v) for v in info.get("list", [])]
def exists(self, remotepath: str) -> bool:
return self._baidupcs.exists(remotepath)
def is_file(self, remotepath: str) -> bool:
return self._baidupcs.is_file(remotepath)
def is_dir(self, remotepath: str) -> bool:
return self._baidupcs.is_dir(remotepath)
def list(
self,
remotepath: str,
desc: bool = False,
name: bool = False,
time: bool = False,
size: bool = False,
) -> List[PcsFile]:
info = self._baidupcs.list(
remotepath, desc=desc, name=name, time=time, size=size
)
return [PcsFile.from_(v) for v in info.get("list", [])]
def upload_file(
self,
localpath: str,
remotepath: str,
ondup="overwrite",
callback: Callable[[MultipartEncoderMonitor], None] = None,
) -> PcsFile:
info = self._baidupcs.upload_file(
localpath, remotepath, ondup=ondup, callback=callback
)
return PcsFile.from_(info)
def rapid_upload_file(
self, localpath: str, remotepath: str, ondup="overwrite"
) -> PcsFile:
info = self._baidupcs.rapid_upload_file(localpath, remotepath, ondup=ondup)
return PcsFile.from_(info)
def upload_slice(
self, buf: bytes, callback: Callable[[MultipartEncoderMonitor], None] = None
) -> str:
info = self._baidupcs.upload_slice(buf, callback=callback)
return info["md5"]
def combine_slices(
self, slice_md5s: List[str], remotepath: str, ondup="overwrite"
) -> PcsFile:
info = self._baidupcs.combine_slices(slice_md5s, remotepath, ondup=ondup)
return PcsFile.from_(info)
def search(
self, keyword: str, remotepath: str, recursive: bool = False
) -> List[PcsFile]:
info = self._baidupcs.search(keyword, remotepath, recursive=recursive)
pcs_files = []
for file_info in info["list"]:
pcs_files.append(PcsFile.from_(file_info))
return pcs_files
def makedir(self, directory: str) -> PcsFile:
info = self._baidupcs.makedir(directory)
return PcsFile.from_(info)
def move(self, *remotepaths: str) -> List[FromTo]:
info = self._baidupcs.move(*remotepaths)
r = info["extra"].get("list")
if not r:
raise BaiduPCSError("File operator [move] fails")
return [FromTo(from_=v["from"], to_=v["to"]) for v in r]
def rename(self, source: str, dest: str) -> FromTo:
info = self._baidupcs.rename(source, dest)
r = info["extra"].get("list")
if not r:
raise BaiduPCSError("File operator [rename] fails")
v = r[0]
return FromTo(from_=v["from"], to_=v["to"])
def copy(self, *remotepaths: str):
info = self._baidupcs.copy(*remotepaths)
r = info["extra"].get("list")
if not r:
raise BaiduPCSError("File operator [copy] fails")
return [FromTo(from_=v["from"], to_=v["to"]) for v in r]
def remove(self, *remotepaths: str):
self._baidupcs.remove(*remotepaths)
def magnet_info(self, magnet: str) -> List[PcsMagnetFile]:
info = self._baidupcs.magnet_info(magnet)
return [PcsMagnetFile.from_(v) for v in info["magnet_info"]]
def torrent_info(self, remote_torrent: str):
self._baidupcs.torrent_info(remote_torrent)
def add_task(self, task_url: str, remotedir: str) -> str:
info = self._baidupcs.add_task(task_url, remotedir)
return str(info["task_id"])
def tasks(self, *task_ids: str) -> List[CloudTask]:
info = self._baidupcs.tasks(*task_ids)
tasks = []
for task_id, v in info["task_info"].items():
v["task_id"] = task_id
tasks.append(CloudTask.from_(v))
return tasks
def list_tasks(self) -> List[CloudTask]:
info = self._baidupcs.list_tasks()
return [CloudTask.from_(v) for v in info["task_info"]]
def clear_tasks(self) -> int:
info = self._baidupcs.clear_tasks()
return info["total"]
def cancel_task(self, task_id: str):
self._baidupcs.cancel_task(task_id)
def share(self, *remotepaths: str, password: Optional[str] = None) -> PcsSharedLink:
info = self._baidupcs.share(*remotepaths, password=password)
link = PcsSharedLink.from_(info)._replace(
paths=list(remotepaths), password=password
)
return link
def list_shared(self, page: int = 1) -> List[PcsSharedLink]:
info = self._baidupcs.list_shared(page)
return [PcsSharedLink.from_(v) for v in info["list"]]
def shared_password(self, share_id: int) -> Optional[str]:
info = self._baidupcs.shared_password(share_id)
p = info["pwd"]
if p == "0":
return None
return p
def cancel_shared(self, *share_ids: int):
self._baidupcs.cancel_shared(*share_ids)
def access_shared(
self,
shared_url: str,
password: str,
vcode_str: str = "",
vcode: str = "",
show_vcode: bool = True,
):
while True:
try:
self._baidupcs.access_shared(shared_url, password, vcode_str, vcode)
return
except BaiduPCSError as err:
if err.error_code not in (-9, -62):
raise err
if show_vcode:
if err.error_code == -62: # -62: '可能需要输入验证码'
print("[yellow]Need vcode![/yellow]")
if err.error_code == -9:
print("[red]vcode is incorrect![/red]")
vcode_str, vcode_img_url = self.getcaptcha(shared_url)
img_cn = self.get_vcode_img(vcode_img_url, shared_url)
img_buf = BytesIO(img_cn)
img_buf.seek(0, 0)
img = Image.open(img_buf)
img.show()
vcode = Prompt.ask("input vcode")
else:
raise err
def getcaptcha(self, shared_url: str) -> Tuple[str, str]:
"""Return `vcode_str`, `vcode_img_url`"""
info = self._baidupcs.getcaptcha(shared_url)
return info["vcode_str"], info["vcode_img"]
def get_vcode_img(self, vcode_img_url: str, shared_url: str) -> bytes:
return self._baidupcs.get_vcode_img(vcode_img_url, shared_url)
def shared_paths(self, shared_url: str) -> List[PcsSharedPath]:
info = self._baidupcs.shared_paths(shared_url)
uk = info["uk"]
share_id = info["shareid"]
bdstoken = info["bdstoken"]
if not info.get("file_list"):
return []
return [
PcsSharedPath.from_(v)._replace(uk=uk, share_id=share_id, bdstoken=bdstoken)
for v in info["file_list"]["list"]
]
def list_shared_paths(
self, sharedpath: str, uk: int, share_id: int, bdstoken: str
) -> List[PcsSharedPath]:
info = self._baidupcs.list_shared_paths(sharedpath, uk, share_id)
return [
PcsSharedPath.from_(v)._replace(uk=uk, share_id=share_id, bdstoken=bdstoken)
for v in info["list"]
]
def transfer_shared_paths(
self,
remotedir: str,
fs_ids: List[int],
uk: int,
share_id: int,
bdstoken: str,
shared_url: str,
):
self._baidupcs.transfer_shared_paths(
remotedir, fs_ids, uk, share_id, bdstoken, shared_url
)
def user_info(self) -> PcsUser:
info = self._baidupcs.user_info()
user_id = int(info["user"]["id"])
user_name = info["user"]["name"]
info = self._baidupcs.tieba_user_info(user_id)
age = float(info["user"]["tb_age"])
sex = info["user"]["sex"]
if sex == 1:
sex = "♂"
elif sex == 2:
sex = "♀"
else:
sex = "unknown"
auth = PcsAuth(
bduss=self._baidupcs._bduss,
cookies=self.cookies,
stoken=self._baidupcs._stoken,
ptoken=self._baidupcs._ptoken,
)
quota = self.quota()
products = self.user_products()
return PcsUser(
user_id=user_id,
user_name=user_name,
auth=auth,
age=age,
sex=sex,
quota=quota,
products=products,
)
def user_products(self) -> Dict[str, str]:
info = self._baidupcs.user_products()
pds = {}
for p in info["product_infos"]:
name = p["product_name"]
t = p["end_time"] - p["start_time"]
avail = str(datetime.timedelta(seconds=t))
pds[name] = f"Left {avail}"
pds["level"] = info["level_info"]["current_level"]
return pds
def download_link(self, remotepath: str) -> str:
info = self._baidupcs.download_link(remotepath)
return info["urls"][0]["url"]
def file_stream(
self,
remotepath: str,
callback: Callable[..., None] = None,
) -> RangeRequestIO:
return self._baidupcs.file_stream(remotepath, callback=callback)
def m3u8_stream(self, remotepath: str, type: M3u8Type = "M3U8_AUTO_720") -> str:
info = self._baidupcs.m3u8_stream(remotepath, type)
if info.get("m3u8_content"):
return info["m3u8_content"]
else:
# Here should be a error
return ""
| [
"dfhayst@gmail.com"
] | dfhayst@gmail.com |
8794ff1566270fab11ca69b0d82320c27b9d6c1a | cf03921f6fe8f079785a2a4fe750ee29b898c1ca | /recognize_distribute.py | a1749f14fa924c93e734cb6c2744a1459e7c5264 | [] | no_license | GUANJIAN1997/Floorlearningandrecognition | 3c99f6bfad210d540d383fe00b1898fa84c4b5e3 | e84d118033b25bb2c20b1ef69fd4bd71ce726081 | refs/heads/master | 2022-06-09T13:23:47.488540 | 2020-05-05T12:19:41 | 2020-05-05T12:19:41 | 261,464,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,021 | py | import json
import numpy as np
filepath = "/Users/jianguan/Desktop/毕业/楼层判定实验数据/test1234567.json"
from recognize_test import find_floor
import scipy.stats
def json2List(filepath):
rowdata = []
f = open(filepath,"r")
for line in f:
decodes = json.loads(line)
rowdata.append(decodes)
f.close()
return rowdata
def get_barometric():
barometric_data = []
barometric_data_HZ = []
transfer_list = []
a = json2List(filepath)
for i in a:
if "hpa" in i:
transfer_list.append(i["hpa"])
if "nodeId" in i:
barometric_data.append(transfer_list)
transfer_list = []
barometric_data.append(transfer_list)
# print(barometric_data)
for i in barometric_data:
barometric_data_HZ.append(round(np.median(i), 4))
# array = np.array(barometric_data_HZ)
# np.savetxt("/Users/jianguan/Desktop/result3.txt", array)
return barometric_data_HZ #得到1.5s的一个气压
# s = get_barometric()
# print(s)
def upordown(gb):
num = 0
test = []
upordown_num = []
# upordown_barometric = []
for num in range(len(gb)-3):
if (abs(gb[num] - gb[num + 1]) >=0.027 and abs(gb[num + 1] - gb[num + 2]) >= 0.027):
upordown_num.append(num)
elif abs(gb[num] - gb[num + 1]) < 0.045 and abs(gb[num + 1] - gb[num + 2]) < 0.045 and abs(gb[num + 2] - gb[num + 3]) < 0.045 :
test.append(gb[num])
else:
upordown_num.append(num)
# print(upordown_num)
return test,upordown_num #得到删除上下楼之后气压值还有上下楼时候的气压编号
# a = upordown(get_barometric())
# print(a)
def get_wifi_bro():
barometric_data_HZ = get_barometric()
rowdata = json2List(filepath)
wifi_data = []
wifi_data_dic_list = []
transfer_list = []
transfer_list2 = []
num_wifi = 0
num_wifi_list = []
for i in rowdata:
if "nodeId" in i:
transfer_list = []
for j in i["scanResults"]:
# print(j)
if j["essid"] == "Rits-Webauth" or j["essid"] == "eduroam":
transfer_list.append(j["bssid"])
transfer_list2.append(j["rssi"])
transfer_list.append(transfer_list2)
transfer_list2 = []
# print(transfer_list)
wifi_data.append(transfer_list)
num_wifi = num_wifi + 1
if not transfer_list:
num_wifi_list.append(num_wifi)
for i in wifi_data:
wifi_data_dic = dict(zip(i[::2],i[1::2]))
wifi_data_dic_list.append(wifi_data_dic)
num = 0
for i in wifi_data_dic_list:
# print(i)
for j in i:
# print(j)
i[j].append(barometric_data_HZ[num])
num = num + 1
return wifi_data_dic_list #得到MAC地址字典值是列表形式的rssi和对应的气压值(每1.5秒一个字典)
# print(get_wifi_bro())
def getmaxbarometric():
learned_floor = find_floor()
learned_floor_dic = []
for i in learned_floor:
learned_floor_dic.append(dict(zip(i[0::2],i[1::2])))
wifi_data_dic_list = get_wifi_bro()
# deleted_upordown_number = set(list(range(len(get_barometric())-1)))-set(upordown(get_barometric())[1]) #删除上下楼的数据的标号之后的标号
upordown_number = upordown(get_barometric())[1]
deleted_upordown_number2 = list(range(len(get_barometric()) - 1))
# print(upordown_number)
for i in upordown_number:
deleted_upordown_number2[i] = "階層移動" #把上下楼的标号标注階層移動*******代替deleted_upordown_number
wifi_floor_list = []
floor_barometric = []
floor_barometric_difference = []
boolean = True
one_floor = []
fingerprint = []
similationWiFi = []
jiao = []
distance = []
distancefinal = []
print(wifi_data_dic_list[0])
for i in range(len(deleted_upordown_number2)): #1.5一个,编号,0到总个数,其中上下楼的编号已经被标注成"階層移動"
if deleted_upordown_number2[i] != "階層移動":
for key in wifi_data_dic_list[i]: #1.5秒的wifi数据
if key not in one_floor:
if wifi_data_dic_list[i][key][0] > -100:
one_floor.append(key)
one_floor.append(wifi_data_dic_list[i][key]) #添加bssid和对应的rssi和气压(新的bssid)
elif wifi_data_dic_list[i][key][0] > -100:
one_floor[one_floor.index(key)+1].insert(len(one_floor[one_floor.index(key)+1])-1,wifi_data_dic_list[i][key][0])
similation_7 = []
jiao_7 = []
for i in learned_floor:
if len(list(set(i[0::2]) & set(one_floor[0::2]))):
similation_7.append(len(list(set(i[0::2]) | set(one_floor[0::2]))) / len(list(set(i[0::2]) & set(one_floor[0::2]))))
jiao_7.append(list(set(i[0::2]) & set(one_floor[0::2])))
else:
similation_7.append(0)
jiao_7.append([])
similationWiFi.append(similation_7) #wifi的类似度
jiao.append(jiao_7) #现在目前累计得到的bssid和学习到的每层的bssid的交集
#目前累计收到的信号计算概率分布
one_floor_dic = dict(zip(one_floor[0::2], one_floor[1::2]))
floor7_pro = []
for i in range(len(jiao_7)):
transfer1 = []
if jiao_7[i]:
for k in jiao_7[i]:
transfer1.append(k)
transfer2 = []
count1 = 0
count2 = 0
count3 = 0
count4 = 0
count5 = 0
count6 = 0
count7 = 0
count8 = 0
count9 = 0
count10 = 0
count11 = 0
count12 = 0
count13 = 0
count14 = 0
count15 = 0
count16 = 0
count = []
for j in one_floor_dic[k]:
if -100 <= j < -95:
count1 += 1
if -95 <= j < -90:
count2 += 1
if -90 <= j < -85:
count3 += 1
if -85 <= j < -80:
count4 += 1
if -80 <= j < -75:
count5 += 1
if -75 <= j < -70:
count6 += 1
if -70 <= j < -65:
count7 += 1
if -65 <= j < -60:
count8 += 1
if -60 <= j < -55:
count9 += 1
if -55 <= j < -50:
count10 += 1
if -50 <= j < -45:
count11 += 1
if -45 <= j < -40:
count12 += 1
if -40 <= j < -35:
count13 += 1
if -35 <= j < -30:
count14 += 1
if -30 <= j < -25:
count15 += 1
if -25 <= j < -20:
count16 += 1
count.append(count1)
count.append(count2)
count.append(count3)
count.append(count4)
count.append(count5)
count.append(count6)
count.append(count7)
count.append(count8)
count.append(count9)
count.append(count10)
count.append(count11)
count.append(count12)
count.append(count13)
count.append(count14)
count.append(count15)
count.append(count16)
sum_count = sum(count)
if sum_count:
for q in count:
if q:
transfer2.append(round((float(q) / float(sum(count))), 6))
#
else:
transfer2.append(float(0.000001))
transfer1.append(transfer2)
floor7_pro.append(transfer1)
else:
transfer3 = []
transfer3.append("N")
transfer3.append([])
floor7_pro.append(transfer3)
floor7_pro_dic = []
for i in floor7_pro:
floor7_pro_dic.append(dict(zip(i[0::2],i[1::2])))
# if len(jiao) == 78:
# print(floor7_pro_dic)
#
# if len(jiao) == 1:
# print(floor7_pro)
KL7 = []
for i in range(len(jiao_7)):
KL_sum = 0
for j in jiao_7[i]:
if j:
KL_sum += scipy.stats.entropy(learned_floor_dic[i][j],floor7_pro_dic[i][j])
if len(jiao_7[i]):
KL7.append(KL_sum)
else:
KL7.append("NO")
distance.append(KL7)
distancefinal_7 = []
for i in range(len(KL7)):
if KL7[i]!="NO":
distancefinal_7.append(KL7[i] * similation_7[i])
else:
distancefinal_7.append("NO")
distancefinal.append(distancefinal_7)
distancefinal_7_num = []
for i in distancefinal_7:
if i != "NO":
distancefinal_7_num.append(i)
if distancefinal_7_num:
floor = distancefinal_7.index(min(distancefinal_7_num)) + 1
else:
floor = 0
# if len(jiao) == 233:
# print(floor7_pro_dic)
# if len(jiao) == 234:
# print(floor7_pro_dic)
# if len(jiao) == 235:
# print(floor7_pro_dic)
print("————————————————————————————————————————認識中————————————————————————————————————————")
print(len(floor7_pro_dic[0]))
print(len(floor7_pro_dic[1]))
print(len(floor7_pro_dic[2]))
print(len(floor7_pro_dic[3]))
print(len(floor7_pro_dic[4]))
print(len(floor7_pro_dic[5]))
print(len(floor7_pro_dic[6]))
print(similation_7)
print(jiao_7)
print(KL7)
print(distancefinal_7)
print("推定した階層は"+str(floor)+"F"+"です")
print(str(len(jiao))+"秒が経ちました")
else:
print("~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~階層移動~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~")
one_floor = []
# if boolean == True: #用这个boolean的意义是因为階層移動有好多个 我只需要一个 否则就会多次操作
# baro = wifi_data_dic_list[i-1][list(wifi_data_dic_list[i-1])[0]][1] #取得最新气压
# one_floor_dic = dict(zip(one_floor[0::2], one_floor[1::2]))
# for key in one_floor_dic:
# one_floor_dic[key].pop(-1)
#
# fingerprint.append(wifi_data_dic_list[i-1]) #添加fingerprint
#
# if one_floor_dic:
# floor_barometric.append(baro)
# if len(floor_barometric) == 1:
# one_floor_dic["X0"]=0
# elif len(floor_barometric) >=2:
# floor_barometric_difference.append(floor_barometric[-1] - floor_barometric[-2])
#
# one_floor_dic[("X0"+"+"+"("+str(round(float(sum(floor_barometric_difference)),4))+")")]=round(float(sum(floor_barometric_difference)),4)
# wifi_floor_list.append(one_floor_dic)
# one_floor_dic = {}
# print("————————————————————————————————————————階層移動————————————————————————————————————————")
# one_floor = []
# boolean = False
# if one_floor:
# floor_barometric.append(one_floor[len(one_floor)-1][len(one_floor[len(one_floor)-1])-1])
# floor_barometric_difference.append(floor_barometric[-1] - floor_barometric[-2])
# one_floor_dic = dict(zip(one_floor[0::2], one_floor[1::2]))
# for key in one_floor_dic:
# one_floor_dic[key].pop(-1)
# one_floor_dic[("X0"+"+"+"("+str(round(float(sum(floor_barometric_difference)),4))+")")]=round(float(sum(floor_barometric_difference)),4)
# wifi_floor_list.append(one_floor_dic)
return wifi_floor_list
# for i in deleted_upordown_number2:
# if i != "階層移動":
# for key in wifi_data_dic_list[i]:
# if wifi_data_dic_list[i][key][0] >= -45:
# key1.append(key)
# key1.append(wifi_data_dic_list[i][key][1]) #如果用聚类的方法找到相对应的超过45的wifi时候需要
# over45_wifi_baro = dict(zip(key1[0::2], key1[1::2]))
# return over45_wifi_baro # 把rssi大于-45的wifi选出来给他们加上气压的标签
#
getmaxbarometric()
# def gps():
# A = []
# B = []
# num = 0
# a = json2List(filepath)
# for i in a:
#
# if "accuracy" in i:
#
# if i["accuracy"] < 15:
# A.append(num)
# break
# else:
# A = []
# else:num = num+1
# for i in A:
# B.append(a[i+1]["hpa"])
#
# c = np.median(B)
# print(c)
#
# test = gps()
| [
"guanjian1997@gmail.com"
] | guanjian1997@gmail.com |
d78dae8aa293992ac876084340178bc18620f645 | 9b617418cfadc6b6deb10c675723485ae49fb221 | /code/resources/users.py | 86659c752c29c1bed0415d2aab2b25db6338b7ac | [
"MIT"
] | permissive | borko81/flask_udemy | 455a555b3892da6d9fee04ba53ea2408dfe23f2b | e8f9192feda1458d1ea44b62d2485e911f16acef | refs/heads/main | 2023-08-07T14:33:47.697962 | 2021-10-08T14:01:13 | 2021-10-08T14:01:13 | 411,242,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | import sqlite3
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
"""
Register class, usinf reqparse from flask_restful
:validation from unique username in form and db too
"""
parser = reqparse.RequestParser()
parser.add_argument("username", type=str, required=True, help="Insert username")
parser.add_argument("password", type=str, required=True, help="Insert password")
def post(self):
data = UserRegister.parser.parse_args()
# this validata username unique
if UserModel.find_by_username(data['username']):
return {"message": "This username not allowed!"}, 400
try:
connection = sqlite3.connect("data.db")
cursor = connection.cursor()
except sqlite3.Error as er:
raise ValueError(er)
else:
query = "INSERT INTO users VALUES (NULL, ?, ?)"
try:
cursor.execute(query, (data['username'], data['password']))
except sqlite3.Error as er:
raise ValueError(er)
else:
connection.commit()
finally:
connection.close()
return {"message": "User created successfully"}, 201
if __name__ == '__main__':
u = UserModel.find_by_username("borko")
print(u)
| [
"bstoilov81@gmail.com"
] | bstoilov81@gmail.com |
0094d1f5bf3946022c0877e2eee2cf4d2e9cbb9a | 8edfd2bee6eafa8c37cdebff5b5e47c88a2375bf | /processing.py | 0ecc8bb27f7e3a10d96ed0567ca4673d3b1ac396 | [] | no_license | carlahy/carbon-calculator | ca205cd2920d1d7603cc154b3a663fe0fd5afc48 | cae2a4cc0673350bde7eed8f998e02024e87d25b | refs/heads/master | 2021-01-11T14:55:22.482258 | 2017-04-28T14:38:57 | 2017-04-28T14:38:57 | 80,251,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | '''
This python script formats the CSV output of the RADAR model
'''
#!/usr/bin/python
import sys, json
import pandas as pd
import numpy as np
from sklearn import preprocessing
# Remove beginning of radar csv output
def parseCSV(mfile):
details = pd.read_csv(mfile, error_bad_lines=False, warn_bad_lines=False)
skip = details.shape[0]
# Strip details and NaN
model = pd.read_csv(mfile,skiprows=skip).dropna().drop('ID',1)
ndecisions = int(details.iloc[skip-3,1])
nobjectives = model.shape[1] - ndecisions - 1
# Get unique decisions
decisions = []
for d in model.columns.values[0:ndecisions]:
decisions.append( {
'values': [i for i in model[d].unique()],
'name': d
})
# Scale objectives columns
ndecisions = len(decisions)
ncols = model.shape[1]
colsToScale = model.columns.values[ndecisions:ncols-1]
# Toggle to scale columns in matrix
# for c in colsToScale:
# colvals = model.values[:,model.columns.get_loc(c)].astype(float)
# col = preprocessing.scale(np.array(colvals))
# model.loc[:,c] = col.reshape(len(col),1)
model = model.round(5)
# Get unique objectives
objectives = []
for o in model.columns.values[ndecisions:ndecisions+nobjectives]:
objectives.append({
'values': [i for i in model[o].unique()],
'name': o
})
# Overwrite radar csv result
model.to_csv(mfile)
return decisions, objectives
if __name__ == '__main__':
args = sys.argv
decisions, objectives = parseCSV(args[1])
res = decisions, objectives
print json.dumps(decisions)
print json.dumps(objectives)
| [
"carla.hyenne@gmail.com"
] | carla.hyenne@gmail.com |
cb91956ee3bbf7e511b24dfb594c6244b79c7014 | 5b79e8fc342c68c0cae1866e6bf25d7c8a223fe9 | /exam3/exam3/wsgi.py | 3f65f1941975f485fb4459195a74bc1c0f4b6ce0 | [] | no_license | mnw247/Python | 357502ceeaa522795e2e0ad27d84fe7add807181 | 9da0c4178bfb41b008751ee55667f19f582def1e | refs/heads/master | 2020-04-30T20:35:22.753753 | 2019-03-22T03:43:00 | 2019-03-22T03:43:00 | 176,868,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | """
WSGI config for exam3 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "exam3.settings")
application = get_wsgi_application()
| [
"mnw247@gmail.com"
] | mnw247@gmail.com |
4c77319d7889ab024a1b16de93a2a5dd8c93f872 | 38c191b32e1d2a5e01edd81153246742fd75736b | /main.py | 243262d6c2c2ef2c57bac74d3116a687cf985767 | [
"MIT"
] | permissive | mzylowsk-legacy/sum | 06501c898e7099accfd9b2fdaa28eeebfbfa21cf | 50ddc08ea3bed33d6588039f5e5a1210795a0ff8 | refs/heads/master | 2021-07-20T10:38:18.319122 | 2019-11-04T22:45:48 | 2019-11-04T22:45:48 | 219,609,844 | 1 | 90 | MIT | 2020-01-11T10:47:08 | 2019-11-04T22:36:43 | Python | UTF-8 | Python | false | false | 181 | py | MAX = 100
sum_even = 0
sum_odd = 0
for i in range(1, MAX+1):
if i % 2 == 0: sum_even += i
else: sum_odd += i
print(f'Suma parzystych {sum_even}\nSuma nieparzystych {sum_odd}')
| [
"zylowskimichal@gmail.com"
] | zylowskimichal@gmail.com |
90e903a2c4638da2f2e712a8b1508d07d6020b50 | 37e178fc62d70c6a8604020ab7fe6c6489aabe70 | /ScratchWork/classes_constructors_attributes.py | 5829885fd735f221c36f3648d28cbadddb2081cf | [] | no_license | camadog/learn-arcade-work | a5008dff730a99187c5a2ac1e7154ec14b9ad124 | ebaff2fe902e975f6519058d5464c9da8149b00f | refs/heads/master | 2023-02-05T03:40:09.678838 | 2020-12-31T20:30:39 | 2020-12-31T20:30:39 | 299,750,993 | 0 | 0 | null | 2020-09-29T22:21:15 | 2020-09-29T22:21:14 | null | UTF-8 | Python | false | false | 1,740 | py | """
Normal way:
class Address:
def __init__(self,
name: str = "",
line1: str = "",
line2: str = "",
city: str = "",
state: str = "",
zip_code: str = ""
):
self.name: str = name
self.line1: str = line1
self.line2: str = line2
self.city: str = city
self.state: str = state
self.zip_code: str = zip_code
python3.8: @dataclass
"""
from dataclasses import dataclass
@dataclass
class Address:
name: str = ""
line1: str = ""
line2: str = ""
city: str = ""
state: str = ""
zip_code: str = ""
class Cat:
"""Singleton == static variable"""
population = 0
def __init__(self, name):
self.name = name
Cat.population += 1
class Character:
"""
Comments are important
"""
def __init__(self):
"""
constructor == dunder method == magic method
dunder = double under scored
"""
""" Data type """
self.name: str = "placeholder"
self.outfit = ""
self.max_hp = 0
self.current_hp = 0
self.armor_amount = 0
self.max_speed = 0
def main():
player = Character()
print(player.name)
player.name = "Jim"
player2 = Character()
player2.name = "Other"
cat1 = Cat("Pat")
cat2 = Cat("Pepper")
cat3 = Cat("Pouncy")
Cat.population = 4
#New field
cat2.population = 5
print("The cat population is:",Cat.population)
print("The cat population is:",cat1.population)
print("The cat population is:",cat2.population)
print("The cat population is:",cat3.population)
address = Address("name","l1","l2","city","state","zip")
print(address)
main()
| [
"deadwar@gmail.com"
] | deadwar@gmail.com |
9486528191ec0a06295e59e2bb33d70bb834e027 | 38e17443f16041025392e9090ba5553efb130a6a | /geston_materiel/apps.py | 580ee11053a4380baa5ddf023a2415cd63da12d3 | [] | no_license | z3r033/gest_mater | 9df6f67b9e20e12f14aad06af1d6f419d0f24a11 | b131f194719e7db99a12965c1d6009d6523d3742 | refs/heads/master | 2023-04-14T01:27:08.497369 | 2021-04-27T18:48:16 | 2021-04-27T18:48:16 | 362,219,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django.apps import AppConfig
class GestonMaterielConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'geston_materiel'
| [
"saad19551945@gmail.com"
] | saad19551945@gmail.com |
45ae1dae1c475f8954d6e7fadf494fc1317ddd86 | db0fa7d3d7cc1ebf99a84bbcbc31cc8329ffd343 | /Activity15.py | 30f0e86320d265b271304bab575c5d4dbd2dc1eb | [] | no_license | MansiSadhwani/PythonSDET | 0457d58971eafdf2062c0add0445bb169f26a493 | 41d6a2baa286b7d8947a89cd38a35b3807eb518a | refs/heads/master | 2022-12-09T07:17:17.905343 | 2020-09-12T20:37:38 | 2020-09-12T20:37:38 | 292,818,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | try:
print(x)
except NameError:
print("NameError!!!!")
finally:
print("Have a great day")
| [
"noreply@github.com"
] | noreply@github.com |
46014a52f2db2d96fc8e257136f49a55eb920f7c | 7edf51e1dd8ea64719f8c3a707a503d273476c2f | /nnCarGame.py | dd454178e1515cc6e2f21ab0ba746567fa069073 | [] | no_license | marekanto/myRace | e75e99aabde7c333b5010e2548098485c18a963c | b4b0849d0266febca0b33d9003f4f4a82d6c3a66 | refs/heads/main | 2023-02-28T16:12:03.453470 | 2021-02-07T13:53:23 | 2021-02-07T13:53:23 | 327,272,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,444 | py | import pygame
import random
import math
import numpy as np
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
pygame.init() # Initialize pygame
# Some variables initializations
img = 0 # This one is used when recording frames
size = width, height = 1600, 900 # Size to use when creating pygame window
# Colors
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 128)
black = (0, 0, 0)
gray = pygame.Color('gray12')
Color_line = (255, 0, 0)
generation = 1
mutationRate = 90
FPS = 30
selectedCars = []
selected = 0
lines = True # If true then lines of player are shown
player = True # If true then player is shown
display_info = True # If true then display info is shown
frames = 0
maxspeed = 15
number_track = 1
white_small_car = pygame.image.load('blue_small.png')
white_big_car = pygame.image.load('blue_big.png')
green_small_car = pygame.image.load('red_small.png')
green_big_car = pygame.image.load('red_big.png')
bg = pygame.image.load('track_7.png')
bg4 = pygame.image.load('track_7_1.png')
def calculateDistance(x1, y1, x2, y2): # Used to calculate distance between points
dist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
return dist
def rotation(origin, point, angle): # Used to rotate points #rotate(origin, point, math.radians(10))
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
def move(point, angle, unit): # Translate a point in a given direction
x = point[0]
y = point[1]
rad = math.radians(-angle % 360)
x += unit * math.sin(rad)
y += unit * math.cos(rad)
return x, y
def sigmoid(z): # Sigmoid function, used as the neurons activation function
return 1.0 / (1.0 + np.exp(-z))
def mutateOneWeightGene(parent1, child1):
sizenn = len(child1.sizes)
# Copy parent weights into child weights
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child1.weights[i][j][k] = parent1.weights[i][j][k]
# Copy parent biases into child biases
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
child1.biases[i][j] = parent1.biases[i][j]
genomeWeights = [] # This will be a list containing all weights, easier to modify this way
for i in range(sizenn - 1): # i=0,1
for j in range(child1.sizes[i] * child1.sizes[i + 1]):
genomeWeights.append(child1.weights[i].item(j))
# Modify a random gene by a random amount
r1 = random.randint(0, len(genomeWeights) - 1)
genomeWeights[r1] = genomeWeights[r1] * random.uniform(0.8, 1.2)
count = 0
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child1.weights[i][j][k] = genomeWeights[count]
count += 1
return
def mutateOneBiasesGene(parent1, child1):
sizenn = len(child1.sizes)
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child1.weights[i][j][k] = parent1.weights[i][j][k]
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
child1.biases[i][j] = parent1.biases[i][j]
genomeBiases = []
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
genomeBiases.append(child1.biases[i].item(j))
r1 = random.randint(0, len(genomeBiases) - 1)
genomeBiases[r1] = genomeBiases[r1] * random.uniform(0.8, 1.2)
count = 0
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
child1.biases[i][j] = genomeBiases[count]
count += 1
return
def uniformCrossOverWeights(parent1, parent2, child1,
child2): # Given two parent car objects, it modifies the children car objects weights
sizenn = len(child1.sizes) # 3 si car1=Car([2, 4, 3])
# Copy parent weights into child weights
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child1.weights[i][j][k] = parent1.weights[i][j][k]
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child2.weights[i][j][k] = parent2.weights[i][j][k]
# Copy parent biases into child biases
for i in range(sizenn - 1):
for j in range(child2.sizes[i + 1]):
child1.biases[i][j] = parent1.biases[i][j]
for i in range(sizenn - 1):
for j in range(child2.sizes[i + 1]):
child2.biases[i][j] = parent2.biases[i][j]
genome1 = [] # This will be a list containing all weights of child1
genome2 = [] # This will be a list containing all weights of child2
for i in range(sizenn - 1): # i=0,1
for j in range(child1.sizes[i] * child1.sizes[i + 1]):
genome1.append(child1.weights[i].item(j))
for i in range(sizenn - 1): # i=0,1
for j in range(child2.sizes[i] * child2.sizes[i + 1]):
genome2.append(child2.weights[i].item(j))
# Crossover weights
alter = True
for i in range(len(genome1)):
if alter == True:
aux = genome1[i]
genome1[i] = genome2[i]
genome2[i] = aux
alter = False
else:
alter = True
# Go back from genome list to weights numpy array on child object
count = 0
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child1.weights[i][j][k] = genome1[count]
count += 1
count = 0
for i in range(sizenn - 1):
for j in range(child2.sizes[i + 1]):
for k in range(child2.sizes[i]):
child2.weights[i][j][k] = genome2[count]
count += 1
return
def uniformCrossOverBiases(parent1, parent2, child1,
child2): # Given two parent car objects, it modifies the children car objects biases
sizenn = len(parent1.sizes)
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child1.weights[i][j][k] = parent1.weights[i][j][k]
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
for k in range(child1.sizes[i]):
child2.weights[i][j][k] = parent2.weights[i][j][k]
for i in range(sizenn - 1):
for j in range(child2.sizes[i + 1]):
child1.biases[i][j] = parent1.biases[i][j]
for i in range(sizenn - 1):
for j in range(child2.sizes[i + 1]):
child2.biases[i][j] = parent2.biases[i][j]
genome1 = []
genome2 = []
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
genome1.append(child1.biases[i].item(j))
for i in range(sizenn - 1):
for j in range(child2.sizes[i + 1]):
genome2.append(child2.biases[i].item(j))
alter = True
for i in range(len(genome1)):
if alter == True:
aux = genome1[i]
genome1[i] = genome2[i]
genome2[i] = aux
alter = False
else:
alter = True
count = 0
for i in range(sizenn - 1):
for j in range(child1.sizes[i + 1]):
child1.biases[i][j] = genome1[count]
count += 1
count = 0
for i in range(sizenn - 1):
for j in range(child2.sizes[i + 1]):
child2.biases[i][j] = genome2[count]
count += 1
return
class Cell:
# A wall separates a pair of cells in the N-S or W-E directions.
wall_pairs = {'N': 'S', 'S': 'N', 'E': 'W', 'W': 'E'}
def __init__(self, x, y):
self.x, self.y = x, y
self.walls = {'N': True, 'S': True, 'E': True, 'W': True}
self.color = 0, 0, 0
self.track = ""
def has_all_walls(self):
return all(self.walls.values())
def knock_down_wall(self, other, wall):
# Knock down the wall between cells self and other
self.walls[wall] = False
other.walls[Cell.wall_pairs[wall]] = False
class Car:
def __init__(self, sizes):
self.score = 0
self.num_layers = len(sizes) # Number of nn layers
self.sizes = sizes # List with number of neurons per layer
self.biases = [np.random.randn(y, 1) for y in sizes[1:]] # Biases
self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])] # Weights
# c1, c2, c3, c4, c5 are five 2D points where the car could collided, updated in every frame
self.c1 = 0, 0
self.c2 = 0, 0
self.c3 = 0, 0
self.c4 = 0, 0
self.c5 = 0, 0
# d1, d2, d3, d4, d5 are distances from the car to those points, updated every frame too and used as the input for the NN
self.d1 = 0
self.d2 = 0
self.d3 = 0
self.d4 = 0
self.d5 = 0
self.self_collided = False
# The input and output of the NN must be in a numpy array format
self.inp = np.array([[self.d1], [self.d2], [self.d3], [self.d4], [self.d5]])
self.outp = np.array([[0], [0], [0], [0]])
# Boolean used for toggling distance lines
self.showlines = False
# Initial location of the car
self.x = 100
self.y = 200
self.center = self.x, self.y
# Height and width of the car
self.height = 35 # 45
self.width = 17 # 25
# These are the four corners of the car, using polygon instead of rectangle object, when rotating or moving the car, we rotate or move these
self.d = self.x - (self.width / 2), self.y - (self.height / 2)
self.c = self.x + self.width - (self.width / 2), self.y - (self.height / 2)
self.b = self.x + self.width - (self.width / 2), self.y + self.height - (
self.height / 2) # The rectangle is centered at (x, y)
self.a = self.x - (self.width / 2), self.y + self.height - (
self.height / 2) # (a), (b), (c), (d) are the vertices
# Velocity, acceleration and direction of the car
self.velocity = 0
self.acceleration = 0
self.angle = 180
# Boolean which goes true when car collides
self.collided = False
# Car color and image
self.color = blue
self.car_image = white_small_car
def set_accel(self, accel):
self.acceleration = accel
def rotate(self, rot):
self.angle += rot
if self.angle > 360:
self.angle = 0
if self.angle < 0:
self.angle = 360 + self.angle
def update(self): # In each frame I update the vertices (translation and rotation) and the collision points
self.score += self.velocity
if self.acceleration != 0:
self.velocity += self.acceleration
if self.velocity > maxspeed:
self.velocity = maxspeed
elif self.velocity < 0:
self.velocity = 0
else:
self.velocity *= 0.92
self.x, self.y = move((self.x, self.y), self.angle, self.velocity)
self.center = self.x, self.y
self.d = self.x - (self.width / 2), self.y - (self.height / 2)
self.c = self.x + self.width - (self.width / 2), self.y - (self.height / 2)
self.b = self.x + self.width - (self.width / 2), self.y + self.height - (
self.height / 2) # The rectangle is centered at (x, y)
self.a = self.x - (self.width / 2), self.y + self.height - (
self.height / 2) # (a), (b), (c), (d) are the vertices
self.a = rotation((self.x, self.y), self.a, math.radians(self.angle))
self.b = rotation((self.x, self.y), self.b, math.radians(self.angle))
self.c = rotation((self.x, self.y), self.c, math.radians(self.angle))
self.d = rotation((self.x, self.y), self.d, math.radians(self.angle))
self.c1 = move((self.x, self.y), self.angle, 10)
while bg4.get_at((int(self.c1[0]), int(self.c1[1]))).a != 0:
self.c1 = move((self.c1[0], self.c1[1]), self.angle, 10)
while bg4.get_at((int(self.c1[0]), int(self.c1[1]))).a == 0:
self.c1 = move((self.c1[0], self.c1[1]), self.angle, -1)
self.c2 = move((self.x, self.y), self.angle + 45, 10)
while bg4.get_at((int(self.c2[0]), int(self.c2[1]))).a != 0:
self.c2 = move((self.c2[0], self.c2[1]), self.angle + 45, 10)
while bg4.get_at((int(self.c2[0]), int(self.c2[1]))).a == 0:
self.c2 = move((self.c2[0], self.c2[1]), self.angle + 45, -1)
self.c3 = move((self.x, self.y), self.angle - 45, 10)
while bg4.get_at((int(self.c3[0]), int(self.c3[1]))).a != 0:
self.c3 = move((self.c3[0], self.c3[1]), self.angle - 45, 10)
while bg4.get_at((int(self.c3[0]), int(self.c3[1]))).a == 0:
self.c3 = move((self.c3[0], self.c3[1]), self.angle - 45, -1)
self.c4 = move((self.x, self.y), self.angle + 90, 10)
while bg4.get_at((int(self.c4[0]), int(self.c4[1]))).a != 0:
self.c4 = move((self.c4[0], self.c4[1]), self.angle + 90, 10)
while bg4.get_at((int(self.c4[0]), int(self.c4[1]))).a == 0:
self.c4 = move((self.c4[0], self.c4[1]), self.angle + 90, -1)
self.c5 = move((self.x, self.y), self.angle - 90, 10)
while bg4.get_at((int(self.c5[0]), int(self.c5[1]))).a != 0:
self.c5 = move((self.c5[0], self.c5[1]), self.angle - 90, 10)
while bg4.get_at((int(self.c5[0]), int(self.c5[1]))).a == 0:
self.c5 = move((self.c5[0], self.c5[1]), self.angle - 90, -1)
self.d1 = int(calculateDistance(self.center[0], self.center[1], self.c1[0], self.c1[1]))
self.d2 = int(calculateDistance(self.center[0], self.center[1], self.c2[0], self.c2[1]))
self.d3 = int(calculateDistance(self.center[0], self.center[1], self.c3[0], self.c3[1]))
self.d4 = int(calculateDistance(self.center[0], self.center[1], self.c4[0], self.c4[1]))
self.d5 = int(calculateDistance(self.center[0], self.center[1], self.c5[0], self.c5[1]))
def draw(self, display):
rotated_image = pygame.transform.rotate(self.car_image, -self.angle - 180)
rect_rotated_image = rotated_image.get_rect()
rect_rotated_image.center = self.x, self.y
gameDisplay.blit(rotated_image, rect_rotated_image)
center = self.x, self.y
if self.showlines:
pygame.draw.line(gameDisplay, Color_line, (self.x, self.y), self.c1, 2)
pygame.draw.line(gameDisplay, Color_line, (self.x, self.y), self.c2, 2)
pygame.draw.line(gameDisplay, Color_line, (self.x, self.y), self.c3, 2)
pygame.draw.line(gameDisplay, Color_line, (self.x, self.y), self.c4, 2)
pygame.draw.line(gameDisplay, Color_line, (self.x, self.y), self.c5, 2)
def showLines(self):
self.showlines = not self.showlines
def feedforward(self):
# Return the output of the network
self.inp = np.array([[self.d1], [self.d2], [self.d3], [self.d4], [self.d5], [self.velocity]])
for b, w in zip(self.biases, self.weights):
self.inp = sigmoid(np.dot(w, self.inp) + b)
self.outp = self.inp
return self.outp
def collision(self):
if (bg4.get_at((int(self.a[0]), int(self.a[1]))).a == 0) or (
bg4.get_at((int(self.b[0]), int(self.b[1]))).a == 0) or (
bg4.get_at((int(self.c[0]), int(self.c[1]))).a == 0) or (
bg4.get_at((int(self.d[0]), int(self.d[1]))).a == 0):
return True
else:
return False
def resetPosition(self):
self.x = 100
self.y = 200
self.angle = 180
return
def takeAction(self):
if self.outp.item(0) > 0.5: # Accelerate
self.set_accel(0.2)
else:
self.set_accel(0)
if self.outp.item(1) > 0.5: # Brake
self.set_accel(-0.2)
if self.outp.item(2) > 0.5: # Turn right
self.rotate(-5)
if self.outp.item(3) > 0.5: # Turn left
self.rotate(5)
return
nnCars = [] # List of neural network cars
num_of_nnCars = 200 # Number of neural network cars
alive = num_of_nnCars # Number of not collided (alive) cars
collidedCars = [] # List containing collided cars
# These is just the text being displayed on pygame window
infoX = 1365
infoY = 600
font = pygame.font.Font('freesansbold.ttf', 18)
text1 = font.render('0..9 - Change Mutation', True, white)
text2 = font.render('LMB - Select/Unselect', True, white)
text3 = font.render('RMB - Delete', True, white)
text4 = font.render('L - Show/Hide Lines', True, white)
text5 = font.render('R - Reset', True, white)
text6 = font.render('B - Breed', True, white)
text7 = font.render('C - Clean', True, white)
# text8 = font.render('N - Next Track', True, white)
text9 = font.render('A - Toggle Player', True, white)
text10 = font.render('D - Toggle Info', True, white)
# text11 = font.render('M - Breed and Next Track', True, white)
text1Rect = text1.get_rect().move(infoX, infoY)
text2Rect = text2.get_rect().move(infoX, infoY + text1Rect.height)
text3Rect = text3.get_rect().move(infoX, infoY + 2 * text1Rect.height)
text4Rect = text4.get_rect().move(infoX, infoY + 3 * text1Rect.height)
text5Rect = text5.get_rect().move(infoX, infoY + 4 * text1Rect.height)
text6Rect = text6.get_rect().move(infoX, infoY + 5 * text1Rect.height)
text7Rect = text7.get_rect().move(infoX, infoY + 6 * text1Rect.height)
# text8Rect = text8.get_rect().move(infoX,infoY+7*text1Rect.height)
text9Rect = text9.get_rect().move(infoX, infoY + 7 * text1Rect.height)
text10Rect = text10.get_rect().move(infoX, infoY + 8 * text1Rect.height)
# text11Rect = text11.get_rect().move(infoX,infoY+10*text1Rect.height)
def displayTexts():
infotextX = 20
infotextY = 600
infotext1 = font.render('Gen ' + str(generation), True, white)
infotext2 = font.render('Cars: ' + str(num_of_nnCars), True, white)
infotext3 = font.render('Alive: ' + str(alive), True, white)
infotext4 = font.render('Selected: ' + str(selected), True, white)
if lines == True:
infotext5 = font.render('Lines ON', True, white)
else:
infotext5 = font.render('Lines OFF', True, white)
if player == True:
infotext6 = font.render('Player ON', True, white)
else:
infotext6 = font.render('Player OFF', True, white)
# infotext7 = font.render('Mutation: '+ str(2*mutationRate), True, white)
# infotext8 = font.render('Frames: ' + str(frames), True, white)
infotext9 = font.render('FPS: 30', True, white)
infotext1Rect = infotext1.get_rect().move(infotextX, infotextY)
infotext2Rect = infotext2.get_rect().move(infotextX, infotextY + infotext1Rect.height)
infotext3Rect = infotext3.get_rect().move(infotextX, infotextY + 2 * infotext1Rect.height)
infotext4Rect = infotext4.get_rect().move(infotextX, infotextY + 3 * infotext1Rect.height)
infotext5Rect = infotext5.get_rect().move(infotextX, infotextY + 4 * infotext1Rect.height)
infotext6Rect = infotext6.get_rect().move(infotextX, infotextY + 5 * infotext1Rect.height)
# infotext7Rect = infotext7.get_rect().move(infotextX,infotextY+6*infotext1Rect.height)
# infotext8Rect = infotext8.get_rect().move(infotextX,infotextY+7*infotext1Rect.height)
infotext9Rect = infotext9.get_rect().move(infotextX, infotextY + 6 * infotext1Rect.height)
gameDisplay.blit(text1, text1Rect)
gameDisplay.blit(text2, text2Rect)
gameDisplay.blit(text3, text3Rect)
gameDisplay.blit(text4, text4Rect)
gameDisplay.blit(text5, text5Rect)
gameDisplay.blit(text6, text6Rect)
gameDisplay.blit(text7, text7Rect)
# gameDisplay.blit(text8, text8Rect)
gameDisplay.blit(text9, text9Rect)
gameDisplay.blit(text10, text10Rect)
# gameDisplay.blit(text11, text11Rect)
gameDisplay.blit(infotext1, infotext1Rect)
gameDisplay.blit(infotext2, infotext2Rect)
gameDisplay.blit(infotext3, infotext3Rect)
gameDisplay.blit(infotext4, infotext4Rect)
gameDisplay.blit(infotext5, infotext5Rect)
gameDisplay.blit(infotext6, infotext6Rect)
# gameDisplay.blit(infotext7, infotext7Rect)
# gameDisplay.blit(infotext8, infotext8Rect)
gameDisplay.blit(infotext9, infotext9Rect)
return
gameDisplay = pygame.display.set_mode(size) # creates screen
clock = pygame.time.Clock()
inputLayer = 6
hiddenLayer = 6
outputLayer = 4
car = Car([inputLayer, hiddenLayer, outputLayer])
auxcar = Car([inputLayer, hiddenLayer, outputLayer])
for i in range(num_of_nnCars):
nnCars.append(Car([inputLayer, hiddenLayer, outputLayer]))
def redrawGameWindow(): # Called on every frame
global alive
global frames
global img
frames += 1
gameD = gameDisplay.blit(bg, (0, 0))
# NN cars
for nncar in nnCars:
if not nncar.collided:
nncar.update() # Update: Every car center coord, corners, directions, collision points and collision distances
if nncar.collision(): # Check which car collided
nncar.collided = True # If collided then change collided attribute to true
if nncar.self_collided == False:
alive -= 1
nncar.self_collided = True
else: # If not collided then feedforward the input and take an action
nncar.feedforward()
nncar.takeAction()
nncar.draw(gameDisplay)
# Same but for player
if player:
car.update()
if car.collision():
car.resetPosition()
car.update()
car.draw(gameDisplay)
if display_info:
displayTexts()
pygame.display.update() # updates the screen
# Take a screenshot of every frame
# pygame.image.save(gameDisplay, "pygameVideo/screenshot" + str(img) + ".jpeg")
# img += 1
while True:
# now1 = time.time()
for event in pygame.event.get(): # Check for events
if event.type == pygame.QUIT:
pygame.quit() # quits
quit()
if event.type == pygame.KEYDOWN: # If user uses the keyboard
if event.key == ord("l"): # If that key is l
car.showLines()
lines = not lines
if event.key == ord("c"): # If that key is c
for nncar in nnCars:
if nncar.collided == True:
nnCars.remove(nncar)
if nncar.self_collided == False:
alive -= 1
if event.key == ord("a"): # If that key is a
player = not player
if event.key == ord("d"): # If that key is d
display_info = not display_info
if event.key == ord("n"): # If that key is n
number_track = 2
for nncar in nnCars:
nncar.velocity = 0
nncar.acceleration = 0
nncar.x = 140
nncar.y = 610
nncar.angle = 180
nncar.collided = False
bg = pygame.image.load('randomGeneratedTrackFront.png')
bg4 = pygame.image.load('randomGeneratedTrackBack.png')
if event.key == ord("b"):
if (len(selectedCars) == 2):
for nncar in nnCars:
nncar.score = 0
alive = num_of_nnCars
generation += 1
selected = 0
nnCars.clear()
for i in range(num_of_nnCars):
nnCars.append(Car([inputLayer, hiddenLayer, outputLayer]))
for i in range(0, num_of_nnCars - 2, 2):
uniformCrossOverWeights(selectedCars[0], selectedCars[1], nnCars[i], nnCars[i + 1])
uniformCrossOverBiases(selectedCars[0], selectedCars[1], nnCars[i], nnCars[i + 1])
nnCars[num_of_nnCars - 2] = selectedCars[0]
nnCars[num_of_nnCars - 1] = selectedCars[1]
nnCars[num_of_nnCars - 2].car_image = green_small_car
nnCars[num_of_nnCars - 1].car_image = green_small_car
nnCars[num_of_nnCars - 2].resetPosition()
nnCars[num_of_nnCars - 1].resetPosition()
nnCars[num_of_nnCars - 2].collided = False
nnCars[num_of_nnCars - 1].collided = False
for i in range(num_of_nnCars - 2):
for j in range(mutationRate):
mutateOneWeightGene(nnCars[i], auxcar)
mutateOneWeightGene(auxcar, nnCars[i])
mutateOneBiasesGene(nnCars[i], auxcar)
mutateOneBiasesGene(auxcar, nnCars[i])
if number_track != 1:
for nncar in nnCars:
nncar.x = 140
nncar.y = 610
selectedCars.clear()
if event.key == ord("r"):
generation = 1
alive = num_of_nnCars
nnCars.clear()
selectedCars.clear()
for i in range(num_of_nnCars):
nnCars.append(Car([inputLayer, hiddenLayer, outputLayer]))
for nncar in nnCars:
if number_track == 1:
nncar.x = 120
nncar.y = 480
elif number_track == 2:
nncar.x = 100
nncar.y = 300
if event.key == ord("0"):
mutationRate = 0
if event.key == ord("1"):
mutationRate = 10
if event.key == ord("2"):
mutationRate = 20
if event.key == ord("3"):
mutationRate = 30
if event.key == ord("4"):
mutationRate = 40
if event.key == ord("5"):
mutationRate = 50
if event.key == ord("6"):
mutationRate = 60
if event.key == ord("7"):
mutationRate = 70
if event.key == ord("8"):
mutationRate = 80
if event.key == ord("9"):
mutationRate = 90
if event.type == pygame.MOUSEBUTTONDOWN:
# This returns a tuple:
# (leftclick, middleclick, rightclick)
# Each one is a boolean integer representing button up/down.
mouses = pygame.mouse.get_pressed()
if mouses[0]:
pos = pygame.mouse.get_pos()
point = Point(pos[0], pos[1])
# Check the car list and see which one was there
for nncar in nnCars:
polygon = Polygon([nncar.a, nncar.b, nncar.c, nncar.d])
if (polygon.contains(point)):
if nncar in selectedCars:
selectedCars.remove(nncar)
selected -= 1
if nncar.car_image == white_big_car:
nncar.car_image = white_small_car
if nncar.car_image == green_big_car:
nncar.car_image = green_small_car
if nncar.collided:
nncar.velocity = 0
nncar.acceleration = 0
nncar.update()
else:
if len(selectedCars) < 2:
selectedCars.append(nncar)
selected += 1
if nncar.car_image == white_small_car:
nncar.car_image = white_big_car
if nncar.car_image == green_small_car:
nncar.car_image = green_big_car
if nncar.collided:
nncar.velocity = 0
nncar.acceleration = 0
nncar.update()
break
if mouses[2]:
pos = pygame.mouse.get_pos()
point = Point(pos[0], pos[1])
for nncar in nnCars:
polygon = Polygon([nncar.a, nncar.b, nncar.c, nncar.d])
if (polygon.contains(point)):
if nncar not in selectedCars:
nnCars.remove(nncar)
alive -= 1
break
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
car.rotate(-5)
if keys[pygame.K_RIGHT]:
car.rotate(5)
if keys[pygame.K_UP]:
car.set_accel(0.2)
else:
car.set_accel(0)
if keys[pygame.K_DOWN]:
car.set_accel(-0.2)
redrawGameWindow()
clock.tick(FPS)
| [
"noreply@github.com"
] | noreply@github.com |
d0beb03d5c7315faf93d164b70e72d5175558f0f | 0e7cb579f15fa835fc1064d2a1cd41496968103a | /tweet-feed-YahooFinance/company_list.py | d954bac362adf47fe5bb53f25ba45228c8ac43ba | [] | no_license | tonykyo3232/Tweets_Downloader | c34faaf5aaa22b542980506d8deaf0489bd5a4ed | a5b768b40ace92dd019c53be5d7046e2bff67679 | refs/heads/main | 2023-08-30T04:06:38.875221 | 2021-11-08T18:02:52 | 2021-11-08T18:02:52 | 421,289,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | '''
reference: https://www.dogsofthedow.com/largest-companies-by-market-cap.htm
returns dictionaries of company list with its stock maeket name
'''
# Declare a dictionary with company full name as key
companies = {
'Apple': 'AAPL',
'Microsoft': 'MSFT',
'Alphabet': 'GOOGL',
'Amazon': 'AMZN',
'Facebook': 'FB',
'Tesla': 'TSLA',
'Berkshire Hathaway': 'BRK.A',
'Taiwan Semiconductor': 'TSM',
'Alibaba': 'BABA',
'Visa': 'V',
'NVIDIA': 'NVDA',
'JPMorgan Chase': 'JPM',
'Johnson & Johnson': 'JNJ',
'Walmart': 'WMT',
'UnitedHealth': 'UNH',
'Mastercard': 'MA',
'Home Depot': 'HD',
'Bank of America': 'BAC',
'Procter & Gamble': 'PG',
'ASML': 'ASML',
'PayPal': 'PYPL',
'Disney': 'DIS',
'Adobe': 'ADBE',
'Nike': 'NKE',
'Pfizer': 'PFE',
'Comcast': 'CMCSA',
'Eli Lilly': 'LLY',
'Oracle': 'ORCL',
'Toyota': 'TM',
'Exxon Mobil': 'XOM',
'Coca-Cola': 'KO',
'Cisco': 'CSCO',
'Verizon': 'VZ',
'Netflix': 'NFLX',
'Salesforce': 'CRM',
'Danaher': 'DHR',
'Intel': 'INTC',
'Abbott Labs': 'ABT',
'Pepsi': 'PEP',
'Thermo Fisher Scientific': 'TMO',
'Novartis': 'NVS',
'Wells Fargo': 'WFC',
'Accenture': 'ACN',
'AbbVie': 'ABBV',
'AT&T': 'T',
'Broadcom': 'AVGO',
'Chevron': 'CVX',
'Costco': 'COST',
'Merck': 'MRK',
'Shopify': 'SHOP'}
# Declare a dictionary with stock market symbol as key
companies_stock_market = {
'AAPL': 'Apple',
'MSFT': 'Microsoft',
'GOOGL': 'Alphabet',
'AMZN': 'Amazon',
'FB': 'Facebook',
'TSLA': 'Tesla',
'BRK.A': 'Berkshire Hathaway',
'TSM': 'Taiwan Semiconductor',
'BABA': 'Alibaba',
'V': 'Visa',
'NVDA': 'NVIDIA',
'JPM': 'JPMorgan Chase',
'JNJ': 'Johnson & Johnson',
'WMT': 'Walmart',
'UNH': 'UnitedHealth',
'MA': 'Mastercard',
'HD': 'Home Depot',
'BAC': 'Bank of America',
'PG': 'Procter & Gamble',
'ASML': 'ASML',
'PYPL': 'PayPal',
'DIS': 'Disney',
'ADBE': 'Adobe',
'NKE': 'Nike',
'PFE': 'Pfizer',
'CMCSA': 'Comcast',
'LLY': 'Eli Lilly',
'ORCL': 'Oracle',
'TM': 'Toyota',
'XOM': 'Exxon Mobil',
'KO': 'Coca-Cola',
'CSCO': 'Cisco',
'VZ': 'Verizon',
'NFLX': 'Netflix',
'CRM': 'Salesforce',
'DHR': 'Danaher',
'INTC': 'Intel',
'ABT': 'Abbott Labs',
'PEP': 'Pepsi',
'TMO': 'Thermo Fisher Scientific',
'NVS': 'Novartis',
'WFC': 'Wells Fargo',
'ACN': 'Accenture',
'ABBV': 'AbbVie',
'T': 'AT&T',
'AVGO': 'Broadcom',
'CVX': 'Chevron',
'COST': 'Costco',
'MRK': 'Merck',
'SHOP': 'Shopify'} | [
"tonykyo3232@gmail.com"
] | tonykyo3232@gmail.com |
ff37f2f29dc53274b7defddb55f14d56de94ae3d | 1a042e236d7bcce549be8ac08b79923da532286b | /simple2_flask/instance/tools/__init__.py | 8a5e174d050b932e20127316681287abea9dc5ab | [] | no_license | aleimu/Julyfire | 3eff3bb39e5c0df14c71c00cbdbcd6710bf227da | 7c22849862316baa8cfdd563bbeafdb5c8e34864 | refs/heads/master | 2022-12-18T16:34:26.806956 | 2019-11-14T09:17:54 | 2019-11-14T09:17:54 | 139,806,191 | 0 | 1 | null | 2022-09-16T17:49:31 | 2018-07-05T06:42:46 | HTML | UTF-8 | Python | false | false | 52 | py | # -*- coding:utf-8 -*-
print("__name__", __name__)
| [
"01551462@yto.net.cn"
] | 01551462@yto.net.cn |
b0405c56be777cb4d67775820663ac7ac5c2a17e | c9b88924141bec00db32c601fe24163f49612973 | /venv/bin/pilconvert.py | 28008917b30d004194eaae23407c8f200babff39 | [] | no_license | cameronccohen/satirev | 23c70ecb9a5c6b60e3671ebde372fc0c3ca9b2b4 | 9e7d7cc9fdedc99ff274f332c86f7e82e2731f19 | refs/heads/master | 2021-03-27T16:47:33.026071 | 2019-10-24T02:44:12 | 2019-10-24T02:44:12 | 77,879,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | #!/Users/Cameron/satirev/venv/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import site
import getopt, string, sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if format:
im.save(argv[1], format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"cameron.c.cohen@gmail.com"
] | cameron.c.cohen@gmail.com |
397ddd8cad079923bf3b58de4cb4c4fdc4e6fb5d | 50976dccd8ecb8184263d4ab488f8d54bfe2ba1b | /codemakery-django/codemakery_site/blog/migrations/0002_post_picture.py | 315fadbd3b6269d34a2184626fbd2aee06ba917a | [] | no_license | dattran96/Order-Pizza-Meeo | 487aec06a489be6e5989ccddac1055f6d9444ec8 | 7e3bf1a66934596abdc16525932483fa97c1e69d | refs/heads/main | 2023-03-02T11:49:34.330840 | 2021-02-06T16:56:16 | 2021-02-06T16:56:16 | 336,534,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # Generated by Django 3.1.4 on 2021-01-03 16:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='picture',
field=models.ImageField(blank=True, max_length=255, null=True, upload_to='pictures/'),
),
]
| [
"t.ng.t.dat@gmail.com"
] | t.ng.t.dat@gmail.com |
0fed61fbca1565d067317ad908eaa721214ec63c | 976042b68ab16fd064b09d07bcc8d5bed54bcab0 | /runbot_merge/tests/conftest.py | 75cad7a8e13be8fe713a3ed8fa828d18e29746f4 | [] | no_license | odoo/runbot | cd713240f47741bf0dff4ffd30aba2840c590ce5 | e0795ffaea9233a89005044bc2bc19fdb32f27a0 | refs/heads/16.0 | 2023-09-03T21:05:05.725854 | 2023-07-20T12:41:43 | 2023-08-07T13:07:30 | 124,407,257 | 97 | 128 | null | 2023-09-14T17:41:15 | 2018-03-08T15:01:30 | Python | UTF-8 | Python | false | false | 1,206 | py | import pytest
import requests
@pytest.fixture()
def module():
return 'runbot_merge'
@pytest.fixture
def page(port):
s = requests.Session()
def get(url):
r = s.get('http://localhost:{}{}'.format(port, url))
r.raise_for_status()
return r.content
return get
@pytest.fixture
def default_crons():
return [
# env['runbot_merge.project']._check_fetch()
'runbot_merge.fetch_prs_cron',
# env['runbot_merge.commit']._notify()
'runbot_merge.process_updated_commits',
# env['runbot_merge.project']._check_stagings()
'runbot_merge.merge_cron',
# env['runbot_merge.project']._create_stagings()
'runbot_merge.staging_cron',
# env['runbot_merge.pull_requests']._check_linked_prs_statuses()
'runbot_merge.check_linked_prs_status',
# env['runbot_merge.pull_requests.feedback']._send()
'runbot_merge.feedback_cron',
]
@pytest.fixture
def project(env, config):
return env['runbot_merge.project'].create({
'name': 'odoo',
'github_token': config['github']['token'],
'github_prefix': 'hansen',
'branch_ids': [(0, 0, {'name': 'master'})],
})
| [
"xmo@odoo.com"
] | xmo@odoo.com |
64183ac4cc465a42829ec69748f9176d1f426207 | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Match makers/test.py | e518e3812e5879cbd0b0cddd6bd1e2e5c19ffda8 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 606 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'4',
'1 6 9 12',
'4 12 3 9',
'4',
'2 2 2 2',
'2 2 2 2',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'2\n' +
'4\n')
if __name__ == '__main__':
unittest.main()
| [
"hbinhct@gmail.com"
] | hbinhct@gmail.com |
869738a8fa6678dc5fc0d5c150e28ec1a2bc1392 | e980abd72cc1291ce571883ca88714b2182f991a | /instagram_crawl_hashtag/spiders/insta_spider.py | ac5ed89760bcde4b95aef34f2c770f836a3496b2 | [] | no_license | seoulblanc/instagram_crawl_hashtag | 0993bfe7a1ec50d0003ea590ea22f53fadd540f9 | 5b9e15627fdef1ddcdbd0072f328e30fcff13275 | refs/heads/master | 2020-03-21T15:10:33.380478 | 2018-06-26T04:11:06 | 2018-06-26T04:11:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,079 | py | import re
import scrapy
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
#scrapy crawl getHashtag -a search_tag=비온다그램 -a tag_count=10
from instagram_crawl_hashtag.items import InstaHashtagItem
import win_unicode_console
win_unicode_console.enable()
class HadhtagSpider(scrapy.Spider):
name = "getHashtag"
def __init__(self, search_tag=None, tag_count=5, *args, **kwargs):
print("search_tag : ", search_tag)
super(HadhtagSpider, self).__init__(*args, **kwargs)
self.search_tag = search_tag
self.tag_count = tag_count
self.browser = webdriver.Chrome('C:\PycharmProjects\insta_hashtag\insta_hashtag\spiders/chromedriver')
def start_requests(self):
url = "https://www.instagram.com/explore/tags/" + self.search_tag+"/"
self.browser.get(url)
# self.contents = ''
self.identifier = []
yield scrapy.Request(url, self.parse_insta)
def parse_insta(self, response):
print('######크롤링 시작!######')
element = self.browser.find_element_by_xpath('//*[@id="react-root"]/section/main/article/div[2]/div/div[1]/div[1]/a/div')
hover = ActionChains(self.browser).move_to_element(element).click()
hover.perform()
#게시글 페이지(?) 뜰 때 까지 기다리기
list = WebDriverWait(self.browser, 10).until(EC.visibility_of_element_located((By.XPATH, '//body/div[3]/div/div[2]/div/article/div[2]/div[1]/ul/li')))
ActionChains(self.browser).move_to_element(list).perform()
for c in range(int(self.tag_count)):
#아직 다음 포스트 안떴을때를 대비
post = []
identifier_ = [1, 1, 1]
while self.identifier == identifier_:
id1 = self.browser.find_element_by_xpath('//body/div[3]/div/div[2]/div/article').get_attribute('class')
id2 = self.browser.find_element_by_xpath('//body/div[3]/div/div[2]/div/article/header/div[2]/div[1]/div[1]/a').get_attribute('title')
post = self.browser.find_elements_by_xpath('//body/div[3]/div/div[2]/div/article/div[2]/div[1]/ul/li')
identifier_ = [id1, id2, post]
id1 = self.browser.find_element_by_xpath('//body/div[3]/div/div[2]/div/article').get_attribute('class')
print("id1 :", id1)
id2 = self.browser.find_element_by_xpath('//body/div[3]/div/div[2]/div/article/header/div[2]/div[1]/div[1]/a').get_attribute('title')
print("id2 :", id2)
post = self.browser.find_elements_by_xpath('//body/div[3]/div/div[2]/div/article/div[2]/div[1]/ul/li')
identifier_ = [id1, id2, post]
self.identifier = identifier_
#긁어온 게시글, 댓글 해시태그
item_ = {}
for con in post:
contents = con.find_element_by_xpath('span').text
tags = re.findall('#[^#^\s]+', contents)
if len(tags) != 0:
item_count = 0
for tag in tags:
item_name = 'tag'+str(item_count)
item_[item_name] = tag.replace('#', '')
item_count += 1
element = self.browser.find_element_by_xpath('//body/div[3]/div/div[1]/div/div/a[2]')
hover = ActionChains(self.browser).move_to_element(element).click()
hover.perform()
#다음 페이지 뜰 때까지 기다리기
import time
time.sleep(1)
if len(item_) != 0:
item = InstaHashtagItem()
item['hashtag'] = item_
yield item
# divs = self.browser.find_elements_by_xpath('//*[@id="react-root"]/section/main/article/div[2]/div/div')
#
# # while len(divs) < 10:
# # self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# # divs = self.driver.find_elements_by_xpath('//*[@id="react-root"]/section/main/article/div[2]/div/div')
#
# for div in divs:
# posts = div.find_elements_by_xpath('div')
#
# for post in posts:
# self.contents = post.find_element_by_xpath('a/div/div/img').get_attribute('alt')
# tags = re.findall('#[^#^\s]+', self.contents)
# if len(tags) != 0:
# count = 1
# item_ = {}
#
# for tag in tags:
# item_name = 'tag'+str(count)
# item_[item_name] = tag.replace('#', '')
# count += 1
# item = InstaHashtagItem()
# item['hashtag'] = item_
#
# yield item
# self.browser.close()
| [
"leejihee950430@gmail.com"
] | leejihee950430@gmail.com |
9acb63c62d9c40269b4f7b23cd136dd4b6813020 | 07f5e9a645b00e74b6a399a4447d683829e6d63a | /alveo/examples/tensorflow/getModels.py | f003537d31926639659c4891be7e97bfed9e8b94 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | asirasa-xilinx/Vitis-AI | 3b29dd7d99192d532a1cf81093c5c81de00ec3b5 | 2ea756d2946d66266c111b09b85f4bcf7fc60764 | refs/heads/master | 2020-09-23T12:08:31.647341 | 2019-12-03T00:19:28 | 2019-12-03T00:19:28 | 225,497,465 | 0 | 0 | NOASSERTION | 2019-12-03T00:46:26 | 2019-12-03T00:46:25 | null | UTF-8 | Python | false | false | 2,171 | py | '''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os, subprocess
import shutil
models = [
"https://www.xilinx.com/bin/public/openDownload?filename=squeezenet.zip",
"https://www.xilinx.com/bin/public/openDownload?filename=models.container.tensorflow.inception_v1_baseline.pb_2019-07-18.zip",
"https://www.xilinx.com/bin/public/openDownload?filename=inception_v4.zip",
"https://www.xilinx.com/bin/public/openDownload?filename=models.container.tensorflow.resnet50_baseline.pb_2019-07-18.zip",
"https://www.xilinx.com/bin/public/openDownload?filename=resnet_v1_101.zip",
"https://www.xilinx.com/bin/public/openDownload?filename=resnet_v1_152.zip",
]
# Where will we work
workDir = os.path.dirname(os.path.realpath(__file__)) + "/TEMP"
# Where are we putting models
modelsDir = os.path.dirname(os.path.realpath(__file__)) + "/models"
try:
os.makedirs(modelsDir)
except OSError as e:
if e.errno != os.errno.EEXIST:
print("Error creating model directory, check permissions!")
raise
print ("Model directory already exists!")
try:
os.makedirs(workDir)
except OSError as e:
if e.errno != os.errno.EEXIST:
print("Error creating work directory, check permissions!")
raise
print ("Work directory already exists!")
os.chdir(workDir)
for model in models:
subprocess.call(["wget",model,"-O","temp.zip"])
subprocess.call(["unzip","-o","temp.zip"])
# Strip Unnecessary heirarchy
for Dir,SubDirs,Files in os.walk("models"):
if len(Files) > 0:
break
for File in Files:
subprocess.call(["mv",os.path.join(Dir,File),modelsDir])
subprocess.call(["rm","-rf","temp.zip","models"])
shutil.rmtree(workDir)
| [
"bryanloz@xilinx.com"
] | bryanloz@xilinx.com |
072e40a242d378c1a17f9f2a3f62a08178177a55 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_202/64.py | 65c7985126860c68e994a7482a5134f0be8da6ab | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,998 | py | q = int(input())
for case in range(1,q+1):
n,m = [int(x) for x in input().split()]
cols = [0]*n
rows = [0]*n
firstrow_plus = [0]*n
orgmatrix = [[0]*n for _ in range(n)]
matrix = [[0]*n for _ in range(n)]
backwards = [0]*(2*n-1)
forwards = [0]*(2*n-1)
points = 0
for _ in range(m):
c,b,a = input().split()
a = int(a)-1
b = int(b)-1
if c == 'x' or c == 'o':
cols[a] += 1
rows[b] += 1
points += 1
orgmatrix[b][a] += 1
if c == '+' or c == 'o':
c1,c2 = a+b,a-b
backwards[c2]+=1
forwards[c1]+=1
firstrow_plus[a] += 1
points += 1
orgmatrix[b][a] += 2
numbackwards = [0]*(2*n-1)
numforwards = [0]*(2*n-1)
for i in range(n):
for j in range(n):
c1,c2 = i+j,i-j
numbackwards[c2]+=1
numforwards[c1]+=1
def cover(pos):
i,j = pos
c1,c2 = i+j,i-j
return numbackwards[c2] + numforwards[c1]
poi = [(i,j) for i in range(n) for j in range(n)]
poi.sort(key = lambda x: cover(x))
for pos in poi:
i,j = pos
c1,c2 = i+j,i-j
if backwards[c2]== 0 and forwards[c1] == 0:
matrix[j][i] += 2
points += 1
backwards[c2]+=1
forwards[c1]+=1
i = 0
j = 0
while i < n and j < n:
while i < n and rows[i]>0:
i+=1
while j<n and cols[j]>0:
j+=1
if i >= n or j >= n:
continue
rows[i] += 1
cols[j] += 1
matrix[i][j] += 1
points += 1
#for j in range(n):
# if firstrow_plus[j] == 0:
# matrix[0][j] += 2
# points += 1
#for j in range(1,n-1):
# matrix[n-1][j] += 2
# points += 1
changes = 0
for i in range(n):
for j in range(n):
if matrix[i][j]>0:
changes += 1
print('Case #%d: %d %d' %(case,points,changes))
for i in range(n):
for j in range(n):
if matrix[i][j]==1:
if orgmatrix[i][j]>0:
print('o %d %d' %(i+1,j+1))
else:
print('x %d %d' %(i+1,j+1))
elif matrix[i][j]==2:
if orgmatrix[i][j]>0:
print('o %d %d' %(i+1,j+1))
else:
print('+ %d %d' %(i+1,j+1))
elif matrix[i][j]>2:
print('o %d %d' %(i+1,j+1))
#prmat = [['.']*n for _ in range(n)]
#for i in range(n):
# for j in range(n):
# dumhet = matrix[i][j] + orgmatrix[i][j]
# if dumhet == 1:
# prmat[i][j] = 'x'
# elif dumhet == 2:
# prmat[i][j] = '+'
# elif dumhet == 3:
# prmat[i][j] = 'o'
#for i in range(n):
# print(*prmat[i])
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
6c02877e13746bdf4d82c317b3033d423649db66 | c479096ea602d145f50f6b6d9f40fd23a59bace1 | /blog/sblog/forms.py | 34a4e069c6aeee73bc1a9d4034f0b0580a8c7a6e | [] | no_license | lnever/a-silly-blog | 99cb6ca9c37b432dfefab3650a7c2e2e916617da | c274f90ef3d927cdb7c6d077602afa5faab3b0bf | refs/heads/master | 2021-01-21T13:48:07.909909 | 2016-05-26T12:54:20 | 2016-05-26T12:54:20 | 55,614,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django import forms
class BlogForm(forms.Form):
caption = forms.CharField(label='title', max_length=100)
content = forms.CharField(widget=forms.Textarea) | [
"root@lnever.com"
] | root@lnever.com |
146bec9203e68c0fa200167c79b103029ee2fab6 | d8a4bef9926d751e6246ade415cbb27e584f8a8d | /Dev/trydjango/bin/django-admin | 738f0a00eba24161bd9b031b6ef5f9cc3f0d08a6 | [] | no_license | KannanK-creator/trydjango | ad8b31a832f9b990ba44f5e9e0dbcd6fa163ed7f | 399b601144be5d2481d0bef5328891c33809b7e1 | refs/heads/main | 2023-06-30T12:07:08.428767 | 2021-07-30T23:20:16 | 2021-07-30T23:20:16 | 391,206,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | #!/Users/kannankathiresan/Dev/trydjango/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"kkannan171280@gmail.com"
] | kkannan171280@gmail.com | |
2590c023d108e24d8b87283bf38c9ad7246bd708 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_testifying.py | 54d585fa8f0db28a02a10621604b7a87579812f2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py |
from xai.brain.wordbase.verbs._testify import _TESTIFY
#calss header
class _TESTIFYING(_TESTIFY, ):
def __init__(self,):
_TESTIFY.__init__(self)
self.name = "TESTIFYING"
self.specie = 'verbs'
self.basic = "testify"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
14f52a34bc9255f5bb49c7dae494fd7834405746 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/ancprange_5c7b16a33b225110a96fb24def386cf1.py | c5c1d643faab09f0a08e0e592081cf841b68970c | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123,256 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class AncpRange(Base):
"""ANCP Range
The AncpRange class encapsulates a list of ancpRange resources that are managed by the user.
A list of resources can be retrieved from the server using the AncpRange.find() method.
The list can be managed by using the AncpRange.add() and AncpRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'ancpRange'
_SDM_ATT_MAP = {
'AccessAggregationCbTlv': 'accessAggregationCbTlv',
'AtmVci': 'atmVci',
'AtmVpi': 'atmVpi',
'CircuitId': 'circuitId',
'DistributionAlgorithmPercent': 'distributionAlgorithmPercent',
'EnableAccessAggregation': 'enableAccessAggregation',
'Enabled': 'enabled',
'InnerVlanId': 'innerVlanId',
'Name': 'name',
'NasAncpServicePort': 'nasAncpServicePort',
'NasIPAddressIncr': 'nasIPAddressIncr',
'NasIpAddress': 'nasIpAddress',
'NasIpAddressIncr': 'nasIpAddressIncr',
'NasKeepAliveRetries': 'nasKeepAliveRetries',
'NasKeepAliveTimeout': 'nasKeepAliveTimeout',
'ObjectId': 'objectId',
'OuterVlanId': 'outerVlanId',
'UseDslInnerVlan': 'useDslInnerVlan',
'UseDslOuterVlan': 'useDslOuterVlan',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(AncpRange, self).__init__(parent, list_op)
@property
def AncpAtmRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpatmrange_143d9caa5302cd12e3b869aef31fa963.AncpAtmRange): An instance of the AncpAtmRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpatmrange_143d9caa5302cd12e3b869aef31fa963 import AncpAtmRange
if self._properties.get('AncpAtmRange', None) is not None:
return self._properties.get('AncpAtmRange')
else:
return AncpAtmRange(self)._select()
@property
def AncpIpRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpiprange_839eec134c905181ca26fbb79aa00c6a.AncpIpRange): An instance of the AncpIpRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpiprange_839eec134c905181ca26fbb79aa00c6a import AncpIpRange
if self._properties.get('AncpIpRange', None) is not None:
return self._properties.get('AncpIpRange')
else:
return AncpIpRange(self)._select()
@property
def AncpMacRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpmacrange_d7adfce512ae33443d921aa75806d22c.AncpMacRange): An instance of the AncpMacRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpmacrange_d7adfce512ae33443d921aa75806d22c import AncpMacRange
if self._properties.get('AncpMacRange', None) is not None:
return self._properties.get('AncpMacRange')
else:
return AncpMacRange(self)._select()
@property
def AncpPvcRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancppvcrange_2439e131bb1475c2b8a3037752391f88.AncpPvcRange): An instance of the AncpPvcRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancppvcrange_2439e131bb1475c2b8a3037752391f88 import AncpPvcRange
if self._properties.get('AncpPvcRange', None) is not None:
return self._properties.get('AncpPvcRange')
else:
return AncpPvcRange(self)._select()
@property
def AncpVlanRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpvlanrange_7b49b7726b3a28e72b7a61873dd18f8b.AncpVlanRange): An instance of the AncpVlanRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.ancpvlanrange_7b49b7726b3a28e72b7a61873dd18f8b import AncpVlanRange
if self._properties.get('AncpVlanRange', None) is not None:
return self._properties.get('AncpVlanRange')
else:
return AncpVlanRange(self)._select()
@property
def DslProfileAllocationTable(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslprofileallocationtable_0f362d03bbc7a8b4a0c98fc2d749d061.DslProfileAllocationTable): An instance of the DslProfileAllocationTable class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslprofileallocationtable_0f362d03bbc7a8b4a0c98fc2d749d061 import DslProfileAllocationTable
if self._properties.get('DslProfileAllocationTable', None) is not None:
return self._properties.get('DslProfileAllocationTable')
else:
return DslProfileAllocationTable(self)
@property
def DslResyncProfileAllocationTable(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslresyncprofileallocationtable_e84fe32c361c2c736b5bd2f1a6d3c33f.DslResyncProfileAllocationTable): An instance of the DslResyncProfileAllocationTable class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.dslresyncprofileallocationtable_e84fe32c361c2c736b5bd2f1a6d3c33f import DslResyncProfileAllocationTable
if self._properties.get('DslResyncProfileAllocationTable', None) is not None:
return self._properties.get('DslResyncProfileAllocationTable')
else:
return DslResyncProfileAllocationTable(self)
@property
def AccessAggregationCbTlv(self):
# type: () -> str
"""
Returns
-------
- str: Enable Access Aggregation Circuit ID Binary TLV
"""
return self._get_attribute(self._SDM_ATT_MAP['AccessAggregationCbTlv'])
@AccessAggregationCbTlv.setter
def AccessAggregationCbTlv(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['AccessAggregationCbTlv'], value)
@property
def AtmVci(self):
# type: () -> int
"""
Returns
-------
- number: Custom VPI.
"""
return self._get_attribute(self._SDM_ATT_MAP['AtmVci'])
@AtmVci.setter
def AtmVci(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['AtmVci'], value)
@property
def AtmVpi(self):
# type: () -> int
"""
Returns
-------
- number: Custom VCI.
"""
return self._get_attribute(self._SDM_ATT_MAP['AtmVpi'])
@AtmVpi.setter
def AtmVpi(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['AtmVpi'], value)
@property
def CircuitId(self):
# type: () -> str
"""
Returns
-------
- str: Circuit ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['CircuitId'])
@CircuitId.setter
def CircuitId(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['CircuitId'], value)
@property
def DistributionAlgorithmPercent(self):
# type: () -> int
"""
Returns
-------
- number: DSL_Subscriber-per-AN distribution scattering model.
"""
return self._get_attribute(self._SDM_ATT_MAP['DistributionAlgorithmPercent'])
@DistributionAlgorithmPercent.setter
def DistributionAlgorithmPercent(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['DistributionAlgorithmPercent'], value)
@property
def EnableAccessAggregation(self):
# type: () -> bool
"""
Returns
-------
- bool: Enable Access Aggregation Circuit Binary.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableAccessAggregation'])
@EnableAccessAggregation.setter
def EnableAccessAggregation(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableAccessAggregation'], value)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def InnerVlanId(self):
# type: () -> int
"""
Returns
-------
- number: Custom inner VLAN ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerVlanId'])
@InnerVlanId.setter
def InnerVlanId(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerVlanId'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NasAncpServicePort(self):
# type: () -> int
"""
Returns
-------
- number: NAS Ancp Service Port.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasAncpServicePort'])
@NasAncpServicePort.setter
def NasAncpServicePort(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NasAncpServicePort'], value)
@property
def NasIPAddressIncr(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: NAS IP Increment.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasIPAddressIncr'])
@NasIPAddressIncr.setter
def NasIPAddressIncr(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NasIPAddressIncr'], value)
@property
def NasIpAddress(self):
# type: () -> str
"""
Returns
-------
- str: NAS IP.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasIpAddress'])
@NasIpAddress.setter
def NasIpAddress(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NasIpAddress'], value)
@property
def NasIpAddressIncr(self):
# type: () -> str
"""
Returns
-------
- str: NAS IP Increment.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasIpAddressIncr'])
@NasIpAddressIncr.setter
def NasIpAddressIncr(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['NasIpAddressIncr'], value)
@property
def NasKeepAliveRetries(self):
# type: () -> int
"""
Returns
-------
- number: NAS Keep Alive Retries.
"""
return self._get_attribute(self._SDM_ATT_MAP['NasKeepAliveRetries'])
@NasKeepAliveRetries.setter
def NasKeepAliveRetries(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NasKeepAliveRetries'], value)
@property
def NasKeepAliveTimeout(self):
# type: () -> int
"""
Returns
-------
- number: NAS Keep Alive Timeout, in seconds
"""
return self._get_attribute(self._SDM_ATT_MAP['NasKeepAliveTimeout'])
@NasKeepAliveTimeout.setter
def NasKeepAliveTimeout(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NasKeepAliveTimeout'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def OuterVlanId(self):
# type: () -> int
"""
Returns
-------
- number: Custom outer VLAN ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['OuterVlanId'])
@OuterVlanId.setter
def OuterVlanId(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['OuterVlanId'], value)
@property
def UseDslInnerVlan(self):
# type: () -> bool
"""
Returns
-------
- bool: Use DSL subscriber inner VLAN.
"""
return self._get_attribute(self._SDM_ATT_MAP['UseDslInnerVlan'])
@UseDslInnerVlan.setter
def UseDslInnerVlan(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UseDslInnerVlan'], value)
@property
def UseDslOuterVlan(self):
# type: () -> bool
"""
Returns
-------
- bool: Use actual DSL outer VLAN.
"""
return self._get_attribute(self._SDM_ATT_MAP['UseDslOuterVlan'])
@UseDslOuterVlan.setter
def UseDslOuterVlan(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UseDslOuterVlan'], value)
def update(self, AccessAggregationCbTlv=None, AtmVci=None, AtmVpi=None, CircuitId=None, DistributionAlgorithmPercent=None, EnableAccessAggregation=None, Enabled=None, InnerVlanId=None, Name=None, NasAncpServicePort=None, NasIPAddressIncr=None, NasIpAddress=None, NasIpAddressIncr=None, NasKeepAliveRetries=None, NasKeepAliveTimeout=None, OuterVlanId=None, UseDslInnerVlan=None, UseDslOuterVlan=None):
# type: (str, int, int, str, int, bool, bool, int, str, int, str, str, str, int, int, int, bool, bool) -> AncpRange
"""Updates ancpRange resource on the server.
Args
----
- AccessAggregationCbTlv (str): Enable Access Aggregation Circuit ID Binary TLV
- AtmVci (number): Custom VPI.
- AtmVpi (number): Custom VCI.
- CircuitId (str): Circuit ID.
- DistributionAlgorithmPercent (number): DSL_Subscriber-per-AN distribution scattering model.
- EnableAccessAggregation (bool): Enable Access Aggregation Circuit Binary.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- InnerVlanId (number): Custom inner VLAN ID.
- Name (str): Name of range
- NasAncpServicePort (number): NAS Ancp Service Port.
- NasIPAddressIncr (str): NAS IP Increment.
- NasIpAddress (str): NAS IP.
- NasIpAddressIncr (str): NAS IP Increment.
- NasKeepAliveRetries (number): NAS Keep Alive Retries.
- NasKeepAliveTimeout (number): NAS Keep Alive Timeout, in seconds
- OuterVlanId (number): Custom outer VLAN ID.
- UseDslInnerVlan (bool): Use DSL subscriber inner VLAN.
- UseDslOuterVlan (bool): Use actual DSL outer VLAN.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AccessAggregationCbTlv=None, AtmVci=None, AtmVpi=None, CircuitId=None, DistributionAlgorithmPercent=None, EnableAccessAggregation=None, Enabled=None, InnerVlanId=None, Name=None, NasAncpServicePort=None, NasIPAddressIncr=None, NasIpAddress=None, NasIpAddressIncr=None, NasKeepAliveRetries=None, NasKeepAliveTimeout=None, OuterVlanId=None, UseDslInnerVlan=None, UseDslOuterVlan=None):
# type: (str, int, int, str, int, bool, bool, int, str, int, str, str, str, int, int, int, bool, bool) -> AncpRange
"""Adds a new ancpRange resource on the server and adds it to the container.
Args
----
- AccessAggregationCbTlv (str): Enable Access Aggregation Circuit ID Binary TLV
- AtmVci (number): Custom VPI.
- AtmVpi (number): Custom VCI.
- CircuitId (str): Circuit ID.
- DistributionAlgorithmPercent (number): DSL_Subscriber-per-AN distribution scattering model.
- EnableAccessAggregation (bool): Enable Access Aggregation Circuit Binary.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- InnerVlanId (number): Custom inner VLAN ID.
- Name (str): Name of range
- NasAncpServicePort (number): NAS Ancp Service Port.
- NasIPAddressIncr (str): NAS IP Increment.
- NasIpAddress (str): NAS IP.
- NasIpAddressIncr (str): NAS IP Increment.
- NasKeepAliveRetries (number): NAS Keep Alive Retries.
- NasKeepAliveTimeout (number): NAS Keep Alive Timeout, in seconds
- OuterVlanId (number): Custom outer VLAN ID.
- UseDslInnerVlan (bool): Use DSL subscriber inner VLAN.
- UseDslOuterVlan (bool): Use actual DSL outer VLAN.
Returns
-------
- self: This instance with all currently retrieved ancpRange resources using find and the newly added ancpRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained ancpRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AccessAggregationCbTlv=None, AtmVci=None, AtmVpi=None, CircuitId=None, DistributionAlgorithmPercent=None, EnableAccessAggregation=None, Enabled=None, InnerVlanId=None, Name=None, NasAncpServicePort=None, NasIPAddressIncr=None, NasIpAddress=None, NasIpAddressIncr=None, NasKeepAliveRetries=None, NasKeepAliveTimeout=None, ObjectId=None, OuterVlanId=None, UseDslInnerVlan=None, UseDslOuterVlan=None):
# type: (str, int, int, str, int, bool, bool, int, str, int, str, str, str, int, int, str, int, bool, bool) -> AncpRange
"""Finds and retrieves ancpRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve ancpRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all ancpRange resources from the server.
Args
----
- AccessAggregationCbTlv (str): Enable Access Aggregation Circuit ID Binary TLV
- AtmVci (number): Custom VPI.
- AtmVpi (number): Custom VCI.
- CircuitId (str): Circuit ID.
- DistributionAlgorithmPercent (number): DSL_Subscriber-per-AN distribution scattering model.
- EnableAccessAggregation (bool): Enable Access Aggregation Circuit Binary.
- Enabled (bool): Disabled ranges won't be configured nor validated.
- InnerVlanId (number): Custom inner VLAN ID.
- Name (str): Name of range
- NasAncpServicePort (number): NAS Ancp Service Port.
- NasIPAddressIncr (str): NAS IP Increment.
- NasIpAddress (str): NAS IP.
- NasIpAddressIncr (str): NAS IP Increment.
- NasKeepAliveRetries (number): NAS Keep Alive Retries.
- NasKeepAliveTimeout (number): NAS Keep Alive Timeout, in seconds
- ObjectId (str): Unique identifier for this object
- OuterVlanId (number): Custom outer VLAN ID.
- UseDslInnerVlan (bool): Use DSL subscriber inner VLAN.
- UseDslOuterVlan (bool): Use actual DSL outer VLAN.
Returns
-------
- self: This instance with matching ancpRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of ancpRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the ancpRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def AncpBringUpDslSubscribers(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpBringUpDslSubscribers operation on the server.
Bring up DSL subscribers
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpBringUpDslSubscribers(async_operation=bool)
-----------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpBringUpDslSubscribers(Arg2=enum, async_operation=bool)
----------------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpBringUpDslSubscribers', payload=payload, response_object=None)
def AncpStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStart operation on the server.
Negotiate ANCP and PPPoE sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStart(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStart(Arg2=enum, async_operation=bool)
------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStart', payload=payload, response_object=None)
def AncpStartAdjacency(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStartAdjacency operation on the server.
Start ANCP adjacency
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStartAdjacency(async_operation=bool)
----------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStartAdjacency(Arg2=enum, async_operation=bool)
---------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStartAdjacency', payload=payload, response_object=None)
def AncpStartResync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStartResync operation on the server.
Start resync ANCP DSL line capabilities for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStartResync(Arg2=number, Arg3=number, async_operation=bool)
---------------------------------------------------------------
- Arg2 (number): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- Arg3 (number): Number of ReSync iterations performed by DSL Lines in selected ANCP ranges.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStartResync(Arg2=number, Arg3=number, Arg4=enum, async_operation=bool)
--------------------------------------------------------------------------
- Arg2 (number): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- Arg3 (number): Number of ReSync iterations performed by DSL Lines in selected ANCP ranges.
- Arg4 (str(async | sync)): Time gap between ReSync terations performed by DSL Lines in selected ANCP ranges [seconds].
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStartResync', payload=payload, response_object=None)
def AncpStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStop operation on the server.
Teardown ANCP and PPPoE sessions for selected ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStop(async_operation=bool)
------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStop(Arg2=enum, async_operation=bool)
-----------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStop', payload=payload, response_object=None)
def AncpStopAdjacency(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpStopAdjacency operation on the server.
Stop ANCP adjacency
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpStopAdjacency(async_operation=bool)
---------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpStopAdjacency(Arg2=enum, async_operation=bool)
--------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpStopAdjacency', payload=payload, response_object=None)
def AncpTeardownDslSubscribers(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the ancpTeardownDslSubscribers operation on the server.
Tear down DSL subscribers
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
ancpTeardownDslSubscribers(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
ancpTeardownDslSubscribers(Arg2=enum, async_operation=bool)
-----------------------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('ancpTeardownDslSubscribers', payload=payload, response_object=None)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Negotiate sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(Arg2=enum, async_operation=bool)
--------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Teardown sessions for all protocols on all ranges belonging to selected plugins
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(Arg2=enum, async_operation=bool)
-------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm,/vport/protocolStack/atm/dhcpEndpoint,/vport/protocolStack/atm/dhcpEndpoint/ancp,/vport/protocolStack/atm/dhcpEndpoint/range,/vport/protocolStack/atm/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/dhcpServerEndpoint,/vport/protocolStack/atm/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/atm/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip,/vport/protocolStack/atm/emulatedRouter/ip/ancp,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ip/twampClient,/vport/protocolStack/atm/emulatedRouter/ip/twampServer,/vport/protocolStack/atm/emulatedRouter/ipEndpoint,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/atm/emulatedRouterEndpoint,/vport/protocolStack/atm/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/atm/ip,/vport/protocolStack/atm/ip/ancp,/vport/protocolStack/atm/ip/egtpPcrfEndpoint,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/l2tpEndpoint,/vport/protocolStack/atm/ip/l2tpEndpoint/range,/vport/protocolStack/atm/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/smDnsEndpoint,/vport/protocolStack/atm/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/atm/ip/twampClient,/vport/protocolStack/atm/ip/twampServer,/vport/protocolStack/atm/ipEndpoint,/vport/protocolStack/atm/ipEndpoint/ancp,/vport/protocolStack/atm/ipEndpoint/range/amtRange,/vport/protocolStack/atm/ipEndpoint/range/ancpRange,/vport/protocolStack/atm/ipEndpoint/range/twampControlRange,/vport/protocolStack/atm/ipEndpoint/twampClient,/vport/protocolStack/atm/ipEndpoint/twampServer,/vport/protocolStack/atm/pppox,/vport/protocolStack/atm/pppox/ancp,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/atm/pppoxEndpoint,/vport/protocolStack/atm/pppoxEndpoint/ancp,/vport/protocolStack/atm/pppoxEndpoint/range,/vport/protocolStack/atm/pppoxEndpoint/range/ancpRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/atm/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet,/vport/protocolStack/ethernet/dcbxEndpoint,/vport/protocolStack/ethernet/dcbxEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint,/vport/protocolStack/ethernet/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/dhcpEndpoint/range,/vport/protocolStack/ethernet/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/dhcpServerEndpoint,/vport/protocolStack/ethernet/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/dhcpServerEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip,/vport/protocolStack/ethernet/emulatedRouter/ip/ancp,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ip/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ip/twampServer,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/ancp,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampClient,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/twampServer,/vport/protocolStack/ethernet/emulatedRouterEndpoint,/vport/protocolStack/ethernet/emulatedRouterEndpoint/range/amtRange,/vport/protocolStack/ethernet/esmc,/vport/protocolStack/ethernet/fcoeClientEndpoint,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFdiscRange,/vport/protocolStack/ethernet/fcoeClientEndpoint/range/fcoeClientFlogiRange,/vport/protocolStack/ethernet/fcoeFwdEndpoint,/vport/protocolStack/ethernet/fcoeFwdEndpoint/range,/vport/protocolStack/ethernet/fcoeFwdEndpoint/secondaryRange,/vport/protocolStack/ethernet/ip,/vport/protocolStack/ethernet/ip/ancp,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/amtRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ip/twampClient,/vport/protocolStack/ethernet/ip/twampServer,/vport/protocolStack/ethernet/ipEndpoint,/vport/protocolStack/ethernet/ipEndpoint/ancp,/vport/protocolStack/ethernet/ipEndpoint/range/amtRange,/vport/protocolStack/ethernet/ipEndpoint/range/ancpRange,/vport/protocolStack/ethernet/ipEndpoint/range/twampControlRange,/vport/protocolStack/ethernet/ipEndpoint/twampClient,/vport/protocolStack/ethernet/ipEndpoint/twampServer,/vport/protocolStack/ethernet/pppox,/vport/protocolStack/ethernet/pppox/ancp,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/pppoxEndpoint,/vport/protocolStack/ethernet/pppoxEndpoint/ancp,/vport/protocolStack/ethernet/pppoxEndpoint/range,/vport/protocolStack/ethernet/pppoxEndpoint/range/ancpRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6PdClientRange,/vport/protocolStack/ethernet/pppoxEndpoint/range/dhcpv6ServerRange,/vport/protocolStack/ethernet/vepaEndpoint,/vport/protocolStack/ethernet/vepaEndpoint/range,/vport/protocolStack/ethernetEndpoint,/vport/protocolStack/ethernetEndpoint/esmc,/vport/protocolStack/fcClientEndpoint,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range,/vport/protocolStack/fcClientEndpoint/range/fcClientFdiscRange,/vport/protocolStack/fcClientEndpoint/range/fcClientFlogiRange,/vport/protocolStack/fcFportFwdEndpoint,/vport/protocolStack/fcFportFwdEndpoint/range,/vport/protocolStack/fcFportFwdEndpoint/secondaryRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
1eb98f2d844650b882428fefa03f6399e0a06394 | 8ebcb068da208128e4a3e1e7931c21ef43407eae | /udemyPython/udemyTest1.py | 3de1db1925de51bbc90486cf1ef041b9b0c8b592 | [] | no_license | orakinahmed/Python-Practice | f09d33d6d22a4d957d88a855d895174828ef1270 | a4cadfc4933108497e9ad431ee1e5f2839024b2a | refs/heads/master | 2023-06-12T07:21:31.741876 | 2021-07-07T19:21:05 | 2021-07-07T19:21:05 | 383,206,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | # Variables
x = 15
price = 9.99
discount = 0.2
result = price * (1-discount)
print(result)
name = "rakin"
name = "bob"
print(name)
print(name*2)
print(name + name)
a = 25
b = a
print(a)
print(b)
a = 30
b = 17
print(a)
print(b)
#String Formatting
name = "Rakin"
greeting = f"Hello {name}"
print(greeting)
name = "Ben"
print(f"Hello {name}")
name = "john"
greeting = "wassup, {}"
with_name = greeting.format(name)
print(with_name)
name = "Kevin"
greeting = "Bye, {}"
with_name = greeting.format(name)
with_name2 = greeting.format("Ben")
print(with_name)
print(with_name2)
longer_phrase = "Hello, {}. Today is {}."
formatted = longer_phrase.format("Patrick", "Friday")
print(formatted)
#Getting user input
name = input("Enter your name: ")
print(name)
size_input = input("How big is your house (in square feet): ") # input function always gives back string
square_feet = int(size_input) # allowing you to store an integer within variable
square_meters = square_feet / 10.8 # calculating square meters by using the stored variable in square feet
print(f"My house in {square_feet} square feet is about {square_meters:.2f} in square meters")
# using f string to store the values within the string
# :.2f will format square meters into 2 decimal places
| [
"rakin.ahammed@gmail.com"
] | rakin.ahammed@gmail.com |
1cc1d640e21ab0f100c76bc623beac7409d3eb4f | 2b01f8f7ee7e841f64629f88896478d3b4221480 | /flatpages_i18n/middleware.py | 70654537b556fffc508c62e7155e802524587996 | [
"BSD-3-Clause"
] | permissive | PragmaticMates/django-flatpages-i18n | fde6821774419c1a3cbafe317c5c3c91730339a5 | 434903e60518e0c1a54f0aea24e1d3e8eb0449bd | refs/heads/master | 2023-06-09T18:13:11.192887 | 2023-06-05T07:32:10 | 2023-06-05T07:32:10 | 6,462,420 | 16 | 19 | BSD-3-Clause | 2019-07-12T07:03:15 | 2012-10-30T18:40:25 | Python | UTF-8 | Python | false | false | 817 | py | # from django.conf import settings
# from django.http import Http404
# from django.utils.deprecation import MiddlewareMixin
#
# from flatpages_i18n.views import flatpage
#
#
# class FlatpageFallbackMiddleware(MiddlewareMixin):
# def process_response(self, request, response):
# if response.status_code != 404:
# # No need to check for a flatpage for non-404 responses.
# return response
# try:
# return flatpage(request, request.path_info)
# # Return the original response if any errors happened. Because this
# # is a middleware, we can't assume the errors will be caught elsewhere.
# except Http404:
# return response
# except:
# if settings.DEBUG:
# raise
# return response
| [
"erik.telepovsky@gmail.com"
] | erik.telepovsky@gmail.com |
0545ee61a9229fd783190028974b2e8ab8a055d7 | 46de5c99419f112b4507fd386f398769626ad328 | /Leetcode/217.ContainsDuplicate.py | b9f5965a72946935daa2bbc5e1b0c172cd04859d | [] | no_license | sanu11/Codes | 20a7903d95d600078db8b0bf0e12a3731615c3c1 | dd58a5577b51ade54f95c96003fc2c99609c15eb | refs/heads/master | 2021-01-21T04:50:36.855876 | 2019-07-09T05:12:56 | 2019-07-09T05:12:56 | 48,174,017 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) == len(set(nums)):
return False
else:
return True
| [
"sanikashah1110@gmail.com"
] | sanikashah1110@gmail.com |
ab47dbb17dc6be7a3c2cff8263f826ceab05d21e | 5dfeb8a662456ad770aa19ea97174a8a9fd1670e | /test_python/test_scalar_tc.py | 49d74fd5416c5a1e4ac4a39d8b97024ac49605fd | [
"Apache-2.0"
] | permissive | moskomule/TensorComprehensions | ce2e7368c0977a45381fdc906a84d7fd115e98a3 | c215ae2bbe1cf0424a65b1848dec486aa2d2eafa | refs/heads/master | 2021-01-25T14:05:04.242217 | 2018-03-02T22:05:53 | 2018-03-02T22:05:53 | 123,654,128 | 0 | 0 | Apache-2.0 | 2018-03-03T03:05:31 | 2018-03-03T03:05:31 | null | UTF-8 | Python | false | false | 2,577 | py | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import tensor_comprehensions as tc
import torch
import unittest, os
class TestCase(unittest.TestCase):
def test_avgpool_option1(self):
# NOTE: take note of use of {{ }} below for handling TC with scalars
LANG = """
def avgpool(float(B, C, H, W) input) -> (output) {{
output(b, c, h, w) += input(b, c, h * {sH} + kh, w * {sW} + kw) where kh in 0:{kH}, kw in 0:{kW}
}}
"""
avgpool = tc.define(LANG, name="avgpool", constants={"sH":1, "sW":1, "kH":2, "kW":2})
inp = torch.ones(32, 3, 10, 10).cuda()
out = avgpool(inp)
def test_avgpool_option2(self):
# NOTE: take note of use of {{ }}
LANG="""
def avgpool(float(B, C, H, W) input) -> (output) {{
output(b, c, h, w) += input(b, c, h * {sh} + kh, w * {sw} + kw) where kh = [0, {kH}[, kw = [0, {kW}[
}}
"""
sH, sW, kH, kW = 1, 1, 2, 2
# format the strings yourself before passing to TC backend.
LANG = LANG.format(sh=sH, sw=sW, kH=kH, kW=kW)
avgpool = tc.define(LANG, name="avgpool")
inp = torch.ones(1, 1, 4, 4).cuda()
out = avgpool(inp)
def test_avgpool_option3(self):
# If you prefer to do string substitutions yourself, here is another way below
import re
LANG="""
def avgpool(float(B, C, H, W) input) -> (output) {
output(b, c, h, w) += input(b, c, h * <sh> + kh, w * <sw> + kw) where kh in 0:<kH>, kw in 0:<kW>
}
"""
sH, sW, kH, kW = 1, 1, 2, 2
LANG = re.sub('<sh>', str(sH), LANG)
LANG = re.sub('<sw>', str(sW), LANG)
LANG = re.sub('<kH>', str(kH), LANG)
LANG = re.sub('<kW>', str(kW), LANG)
avgpool = tc.define(LANG, name="avgpool")
inp = torch.ones(1, 1, 4, 4).cuda()
out = avgpool(inp)
if __name__ == '__main__':
unittest.main()
| [
"priy2201@gmail.com"
] | priy2201@gmail.com |
a80c67c55e906249141c396955f378a5e71e0c84 | 232c25bb3a419073a46e6dcffe25e4765a1f8dcf | /venv/bin/pip3.8 | 620535e0863a9ca8874d17e9a22511b88873965f | [] | no_license | erm989/COP4814_Project4 | bac6c15660ad4922912b4a6abbadf5e5aa385e3a | d87ee17877c9d33c6bff13e822efeb2a16b79ee6 | refs/heads/master | 2023-01-13T07:33:36.241507 | 2020-11-16T18:54:41 | 2020-11-16T18:54:41 | 313,397,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | 8 | #!/Users/ernstrod/PycharmProjects/AQI_WebApp/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"erm989@yahoo.es"
] | erm989@yahoo.es |
2c124750cf41e1dc6bc757d9cae7b1b23fa31c3b | 0b45866a6129e1036abb45d1cf4d9b0b4ebd0b97 | /ikinciGun/fonksiyon.py | 358eedf08b2c9e7fe0067e48aeddaee8042de595 | [] | no_license | anilyilmazz/Python101 | 34c45f347dbf9425aee2209f47df92a0d66e2413 | 0dc66857e363840cef5305c524fb64cabe08949f | refs/heads/master | 2020-05-16T06:47:37.019304 | 2019-04-22T19:56:55 | 2019-04-22T19:56:55 | 182,858,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | ad = "ali"
soyad = "duru"
def yazdir(kAd,kSoyad):
print(kAd,kSoyad)
def topla(sayi1,sayi2):
return (sayi1 + sayi2)
toplam = topla(5,6)
print(toplam)
def sayilar(sayi1,sayi2,sayi3):
print(sayi1,sayi2,sayi3)
| [
"incelefon@gmail.com"
] | incelefon@gmail.com |
46cfeab80121bbdda4dcb118d85592bc0c74ef1b | 38811cb30ca7c9250ad4e8f187f954c95a496fed | /matcher/wsgi.py | 3e827806bd071b8ff6bc3b49e5dca65ec08bb447 | [] | no_license | NoiCoh/candidatesMatcher | 6ca233dff2d63a36b21c24bc92987fbcf4f4ae5b | 1acb0d9c39eead160fe7c96983ec228efc40469d | refs/heads/main | 2023-03-18T19:01:24.352835 | 2021-03-05T11:52:56 | 2021-03-05T11:52:56 | 344,746,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | """
WSGI config for matcher project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "matcher.settings")
application = get_wsgi_application()
| [
"noi.cohen@ibm.com"
] | noi.cohen@ibm.com |
b5313a0c332b254b40ee3fd6b665c6bb16563be2 | b073515d29bf4decba48a09afa469809067d45fd | /api/backend/script/age_detection/script.py | 2fb8f25b780e06f86da694b1e7afd49487e5e359 | [] | no_license | NgTuanLoc/Photoshop_Webapp | 9b4e520f41da2f6e551fbe22d24a94d56c47037f | 44bc0492be36afad55ca85ae245abcf2af22994a | refs/heads/master | 2023-08-28T06:19:22.814371 | 2021-11-12T08:21:43 | 2021-11-12T08:21:43 | 415,539,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from tensorflow import keras
import cv2 as cv
import numpy as np
model = keras.models.load_model('./Age_sex_detection.h5')
def read_image(image_path):
image=cv.imread(image_path)
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
image= cv.resize(image,(48,48))
return image
def predict_image(image, Model):
image_f = np.array([image])/255
pred_1=Model.predict(image_f)
print(pred_1)
sex_f=['Male','Female']
age=int(np.round(pred_1[1][0]))
sex=int(np.round(pred_1[0][0]))
print("Predicted Age: "+ str(age))
print("Predicted Sex: "+ sex_f[sex])
predict_image(read_image("./data/input/test6.jpg"), model)
| [
"18521011@gm.uit.edu.vn"
] | 18521011@gm.uit.edu.vn |
065f85c6bc7ddd4d0f966870cc2e196bbbf305c5 | baeb6e78133f012d136bbf8f763e3f9d8cb2a235 | /Youtube/11_Oops_1.py | 82de1540571c1352cf67f61d44c346fdfb197bbf | [] | no_license | vipsinha/Python | 7ad36ff9277ac870b07c660ccdace06b92efcba8 | b5acb5ad3e3d319f57220b337becddec14ec4e4d | refs/heads/main | 2023-02-02T13:53:18.389118 | 2020-12-22T19:45:22 | 2020-12-22T19:45:22 | 276,215,002 | 1 | 0 | null | 2020-12-22T19:45:24 | 2020-06-30T21:44:17 | Python | UTF-8 | Python | false | false | 1,473 | py | # Classes 1
print('####Classes_1#####')
class Employee_1:
pass
emp_1 = Employee_1()
emp_2 = Employee_1()
print(emp_1)
print(emp_2)
emp_1.first = 'Vipul'
emp_1.last = 'Sinha'
emp_1.email = 'Vipul.Sinha@company.com'
emp_1.pay = 80000
emp_2.first = 'Check'
emp_2.last = 'Mate'
emp_2.email = 'Check.Mate@company.com'
emp_2.pay = 70000
print(emp_1.email)
print(emp_2.email)
# Classes 2
print('####Classes_2#####')
class Employee_2:
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.email = first + '.' + last + '@company.com'
self.pay = pay
emp_1 = Employee_2('Vipul', 'Sinha', 90000)
emp_2 = Employee_2('Test', 'User', 95000)
print(emp_1.email)
print(emp_2.email)
print('{} {}'.format(emp_1.first, emp_1.last))
# Classes 3
print('####Classes_3#####')
## Clasess
class Employee_3:
# Constructor/ initialised
# self is an instance and others are arguments
def __init__(self, first, last, pay):
self.first = first
self.last = last
self.email = first + '.' + last + '@company.com'
self.pay = pay
# methods
def full_name(self):
return '{} {}'.format(self.first, self.last)
# data and functions associated with a class are called attributes
#instances of a class
emp_1 = Employee_3('Check', 'Mate', 90000)
emp_2 = Employee_3('Test', 'User', 95000)
print(emp_1.full_name())
print(Employee_3.full_name(emp_2)) # instance passed as an argument | [
"vipul.sinha@elektrobit.com"
] | vipul.sinha@elektrobit.com |
c47c8f25ef0a16065acb3db10db7b20ecfff2d6a | 4a9f283c74df2f8113ab2c0841a06905f9191d49 | /hummingbird/ml/operator_converters/_tree_commons.py | 941a021ea341d090c891145c88230809f8f995b4 | [
"BSD-3-Clause",
"MIT"
] | permissive | hannahaih/hummingbird | 8c02abc63c243b357d3a20c931de8384dbaa14ad | b8ec670b3c90ec7e87d3ae4a2b268075bd5eae65 | refs/heads/main | 2023-02-11T01:38:08.603018 | 2020-12-23T23:50:35 | 2020-12-23T23:50:35 | 325,285,418 | 1 | 0 | MIT | 2020-12-29T12:48:35 | 2020-12-29T12:48:34 | null | UTF-8 | Python | false | false | 14,836 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Collections of classes and functions shared among all tree converters.
"""
import copy
import numpy as np
from ._tree_implementations import TreeImpl
from ._tree_implementations import GEMMDecisionTreeImpl, TreeTraversalDecisionTreeImpl, PerfectTreeTraversalDecisionTreeImpl
from . import constants
class Node:
"""
Class defining a tree node.
"""
def __init__(self, id=None):
"""
Args:
id: A unique ID for the node
left: The id of the left node
right: The id of the right node
feature: The feature used to make a decision (if not leaf node, ignored otherwise)
threshold: The threshold used in the decision (if not leaf node, ignored otherwise)
value: The value stored in the leaf (ignored if not leaf node).
"""
self.id = id
self.left = None
self.right = None
self.feature = None
self.threshold = None
self.value = None
class TreeParameters:
"""
Class containing a convenient in-memory representation of a decision tree.
"""
def __init__(self, lefts, rights, features, thresholds, values):
"""
Args:
lefts: The id of the left nodes
rights: The id of the right nodes
feature: The features used to make decisions
thresholds: The thresholds used in the decisions
values: The value stored in the leaves
"""
self.lefts = lefts
self.rights = rights
self.features = features
self.thresholds = thresholds
self.values = values
def _find_max_depth(tree_parameters):
"""
Function traversing all trees in sequence and returning the maximum depth.
"""
depth = 0
for tree in tree_parameters:
tree = copy.deepcopy(tree)
lefts = tree.lefts
rights = tree.rights
ids = [i for i in range(len(lefts))]
nodes = list(zip(ids, lefts, rights))
nodes_map = {0: Node(0)}
current_node = 0
for i, node in enumerate(nodes):
id, left, right = node
if left != -1:
l_node = Node(left)
nodes_map[left] = l_node
else:
lefts[i] = id
l_node = -1
if right != -1:
r_node = Node(right)
nodes_map[right] = r_node
else:
rights[i] = id
r_node = -1
nodes_map[current_node].left = l_node
nodes_map[current_node].right = r_node
current_node += 1
depth = max(depth, _find_depth(nodes_map[0], -1))
return depth
def _find_depth(node, current_depth):
"""
Recursive function traversing a tree and returning the maximum depth.
"""
if node.left == -1 and node.right == -1:
return current_depth + 1
elif node.left != -1 and node.right == -1:
return _find_depth(node.l, current_depth + 1)
elif node.right != -1 and node.left == -1:
return _find_depth(node.r, current_depth + 1)
elif node.right != -1 and node.left != -1:
return max(_find_depth(node.left, current_depth + 1), _find_depth(node.right, current_depth + 1))
def get_tree_implementation_by_config_or_depth(extra_config, max_depth, low=3, high=10):
"""
Utility function used to pick the tree implementation based on input parameters and heurstics.
The current heuristic is such that GEMM <= low < PerfTreeTrav <= high < TreeTrav
Args:
max_depth: The maximum tree-depth found in the tree model.
low: The maximum depth below which GEMM strategy is used
high: The maximum depth for which PerfTreeTrav strategy is used
Returns: A tree implementation
"""
if constants.TREE_IMPLEMENTATION not in extra_config:
if max_depth is not None and max_depth <= low:
return TreeImpl.gemm
elif max_depth is not None and max_depth <= high:
return TreeImpl.perf_tree_trav
else:
return TreeImpl.tree_trav
if extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.gemm.name:
return TreeImpl.gemm
elif extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.tree_trav.name:
return TreeImpl.tree_trav
elif extra_config[constants.TREE_IMPLEMENTATION] == TreeImpl.perf_tree_trav.name:
return TreeImpl.perf_tree_trav
else:
raise ValueError("Tree implementation {} not found".format(extra_config))
def get_tree_params_and_type(tree_infos, get_tree_parameters, extra_config):
"""
Populate the parameters from the trees and pick the tree implementation strategy.
Args:
tree_infos: The information representaing a tree (ensemble)
get_tree_parameters: A function specifying how to parse the tree_infos into a
`operator_converters._tree_commons_TreeParameters` object
extra_config: param extra_config: Extra configuration used also to select the best conversion strategy
Returns:
The tree parameters, the maximum tree-depth and the tre implementation to use
"""
tree_parameters = [get_tree_parameters(tree_info) for tree_info in tree_infos]
max_depth = max(1, _find_max_depth(tree_parameters))
tree_type = get_tree_implementation_by_config_or_depth(extra_config, max_depth)
return tree_parameters, max_depth, tree_type
def get_parameters_for_sklearn_common(tree_infos):
"""
Parse sklearn-based trees, including SklearnRandomForestClassifier/Regressor and SklearnGradientBoostingClassifier/Regressor
Args:
tree_infos: The information representing a tree (ensemble)
Returns: The tree parameters wrapped into an instance of `operator_converters._tree_commons_TreeParameters`
"""
lefts = tree_infos.tree_.children_left
rights = tree_infos.tree_.children_right
features = tree_infos.tree_.feature
thresholds = tree_infos.tree_.threshold
values = tree_infos.tree_.value
return TreeParameters(lefts, rights, features, thresholds, values)
def get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values, extra_config={}):
"""
Common functions used by all tree algorithms to generate the parameters according to the tree_trav strategies.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
Returns:
An array containing the extracted parameters
"""
if len(lefts) == 1:
# Model creating tree with just a single leaf node. We transform it
# to a model with one internal node.
lefts = [1, -1, -1]
rights = [2, -1, -1]
features = [0, 0, 0]
thresholds = [0, 0, 0]
n_classes = values.shape[1] if type(values) is np.ndarray else 1
values = np.array([np.array([0.0]), values[0], values[0]])
values.reshape(3, n_classes)
ids = [i for i in range(len(lefts))]
nodes = list(zip(ids, lefts, rights, features, thresholds, values))
# Refactor the tree parameters in the proper format.
nodes_map = {0: Node(0)}
current_node = 0
for i, node in enumerate(nodes):
id, left, right, feature, threshold, value = node
if left != -1:
l_node = Node(left)
nodes_map[left] = l_node
else:
lefts[i] = id
l_node = -1
feature = -1
if right != -1:
r_node = Node(right)
nodes_map[right] = r_node
else:
rights[i] = id
r_node = -1
feature = -1
nodes_map[current_node].left = l_node
nodes_map[current_node].right = r_node
nodes_map[current_node].feature = feature
nodes_map[current_node].threshold = threshold
nodes_map[current_node].value = value
current_node += 1
lefts = np.array(lefts)
rights = np.array(rights)
features = np.array(features)
thresholds = np.array(thresholds)
values = np.array(values)
return [nodes_map, ids, lefts, rights, features, thresholds, values]
def get_parameters_for_tree_trav_sklearn(lefts, rights, features, thresholds, values, extra_config={}):
"""
This function is used to generate tree parameters for sklearn trees.
Includes SklearnRandomForestClassifier/Regressor, and SklearnGradientBoostingClassifier.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
Returns:
An array containing the extracted parameters
"""
features = [max(x, 0) for x in features]
values = np.array(values)
if len(values.shape) == 3:
values = values.reshape(values.shape[0], -1)
if values.shape[1] > 1:
values /= np.sum(values, axis=1, keepdims=True)
if constants.NUM_TREES in extra_config:
values /= extra_config[constants.NUM_TREES]
return get_parameters_for_tree_trav_common(lefts, rights, features, thresholds, values)
def get_parameters_for_gemm_common(lefts, rights, features, thresholds, values, n_features, extra_config={}):
"""
Common functions used by all tree algorithms to generate the parameters according to the GEMM strategy.
Args:
left: The left nodes
right: The right nodes
features: The features used in the decision nodes
thresholds: The thresholds used in the decision nodes
values: The values stored in the leaf nodes
n_features: The number of expected input features
Returns:
The weights and bias for the GEMM implementation
"""
values = np.array(values)
weights = []
biases = []
if len(lefts) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
lefts = [1, -1, -1]
rights = [2, -1, -1]
features = [0, 0, 0]
thresholds = [0, 0, 0]
n_classes = values.shape[1]
n_classes = values.shape[1]
values = np.array([np.zeros(n_classes), values[0], values[0]])
values.reshape(3, n_classes)
# First hidden layer has all inequalities.
hidden_weights = []
hidden_biases = []
for left, feature, thresh in zip(lefts, features, thresholds):
if left != -1:
hidden_weights.append([1 if i == feature else 0 for i in range(n_features)])
hidden_biases.append(thresh)
weights.append(np.array(hidden_weights).astype("float32"))
biases.append(np.array(hidden_biases).astype("float32"))
n_splits = len(hidden_weights)
# Second hidden layer has ANDs for each leaf of the decision tree.
# Depth first enumeration of the tree in order to determine the AND by the path.
hidden_weights = []
hidden_biases = []
path = [0]
n_nodes = len(lefts)
visited = [False for _ in range(n_nodes)]
class_proba = []
nodes = list(zip(lefts, rights, features, thresholds, values))
while True and len(path) > 0:
i = path[-1]
visited[i] = True
left, right, feature, threshold, value = nodes[i]
if left == -1 and right == -1:
vec = [0 for _ in range(n_splits)]
# Keep track of positive weights for calculating bias.
num_positive = 0
for j, p in enumerate(path[:-1]):
num_leaves_before_p = list(lefts[:p]).count(-1)
if path[j + 1] in lefts:
vec[p - num_leaves_before_p] = 1
num_positive += 1
elif path[j + 1] in rights:
vec[p - num_leaves_before_p] = -1
else:
raise RuntimeError("Inconsistent state encountered while tree translation.")
if values.shape[-1] > 1:
proba = (values[i] / np.sum(values[i])).flatten()
else:
# We have only a single value. e.g., GBDT
proba = values[i].flatten()
# Some Sklearn tree implementations require normalization.
if constants.NUM_TREES in extra_config:
proba /= extra_config[constants.NUM_TREES]
class_proba.append(proba)
hidden_weights.append(vec)
hidden_biases.append(num_positive)
path.pop()
elif not visited[left]:
path.append(left)
elif not visited[right]:
path.append(right)
else:
path.pop()
weights.append(np.array(hidden_weights).astype("float32"))
biases.append(np.array(hidden_biases).astype("float32"))
# OR neurons from the preceding layer in order to get final classes.
weights.append(np.transpose(np.array(class_proba).astype("float32")))
biases.append(None)
return weights, biases
def convert_decision_ensemble_tree_common(
tree_infos, get_parameters, get_parameters_for_tree_trav, n_features, classes=None, extra_config={}
):
tree_parameters, max_depth, tree_type = get_tree_params_and_type(tree_infos, get_parameters, extra_config)
# Generate the tree implementation based on the selected strategy.
if tree_type == TreeImpl.gemm:
net_parameters = [
get_parameters_for_gemm_common(
tree_param.lefts,
tree_param.rights,
tree_param.features,
tree_param.thresholds,
tree_param.values,
n_features,
extra_config,
)
for tree_param in tree_parameters
]
return GEMMDecisionTreeImpl(net_parameters, n_features, classes)
net_parameters = [
get_parameters_for_tree_trav(
tree_param.lefts, tree_param.rights, tree_param.features, tree_param.thresholds, tree_param.values, extra_config,
)
for tree_param in tree_parameters
]
if tree_type == TreeImpl.tree_trav:
return TreeTraversalDecisionTreeImpl(net_parameters, max_depth, n_features, classes, extra_config)
else: # Remaining possible case: tree_type == TreeImpl.perf_tree_trav
return PerfectTreeTraversalDecisionTreeImpl(net_parameters, max_depth, n_features, classes)
| [
"noreply@github.com"
] | noreply@github.com |
62e7ba5ac0d2286b1b931a447d1ceec84e1c04cb | 337bd9d63fd003d4a125dabf0c5e822f4559b0d1 | /stacks_queue_deques/stacks.py | 2a299e17ea448efb653e749b4aa9ad6db8bccb39 | [] | no_license | ferryleaf/GitPythonPrgms | d06c28dc78c03b110158b406238d8211fc09fb8d | 92e6bac29cf28569df0fb66ead74a418db270c03 | refs/heads/master | 2021-08-07T05:14:37.317217 | 2021-08-06T02:03:16 | 2021-08-06T02:03:16 | 239,133,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | '''
Create Basic Stack & it's functionalities
STACK: LIFO[LAST-IN-FAST-OUT]
FUNCTIONALITY:
1. PUSH
2. POP
3. IS_EMPTY
4. LENGTH
5. TOP
'''
class Empty(Exception):
pass
class stack:
def __init__(self):
self.__list=[]
def push(self,val):
self.__list.append(val)
def pop(self):
if not self.is_empty():
self.__list.pop()
else:
raise Empty("Stack is Empty.")
def top(self):
if not self.is_empty():
return self.__list[-1]
else:
raise Empty("Stack is Empty.")
def is_empty(self):
return (self.__len__()==0)
def __len__(self):
return len(self.__list)
def display(self):
return self.__list
if __name__=='__main__':
s=stack()
print(s.is_empty())
s.push(5)
s.push(6)
s.push(7)
print(s.__len__())
print(s.display())
print(s.top())
s.pop()
print(s.display())
s.pop()
s.pop()
print(s.display())
s.pop()
| [
"abhijit.mohapatra@lowes.com"
] | abhijit.mohapatra@lowes.com |
9681691a7788b78ef14b32a5b113a09e94d2ee99 | 08860bc6569c6413508dc0c6321a109092b8e316 | /ukbb_cardiac/common/image_utils.py | 2019879a4bf05e98ef660d8e563a62585d8f1d32 | [] | no_license | aborowska/LVgeometry-prediction | bfea7cd9aa910ba52a5465077cccbfa39ee7c2c8 | 6d82948a83207080d8f02b2b23ed058075a80398 | refs/heads/main | 2023-01-08T11:10:07.290127 | 2020-11-06T13:08:21 | 2020-11-06T13:08:21 | 310,598,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,683 | py | # Copyright 2017, Wenjia Bai. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import cv2
import numpy as np
#import nibabel as nib
import tensorflow as tf
from scipy import ndimage
import scipy.ndimage.measurements as measure
def tf_categorical_accuracy(pred, truth):
""" Accuracy metric """
return tf.reduce_mean(tf.cast(tf.equal(pred, truth), dtype=tf.float32))
def tf_categorical_dice(pred, truth, k):
""" Dice overlap metric for label k """
A = tf.cast(tf.equal(pred, k), dtype=tf.float32)
B = tf.cast(tf.equal(truth, k), dtype=tf.float32)
return 2 * tf.reduce_sum(tf.multiply(A, B)) / (tf.reduce_sum(A) + tf.reduce_sum(B))
def crop_image(image, cx, cy, size):
""" Crop a 3D image using a bounding box centred at (cx, cy) with specified size """
X, Y = image.shape[:2]
r = int(size / 2)
x1, x2 = cx - r, cx + r
y1, y2 = cy - r, cy + r
x1_, x2_ = max(x1, 0), min(x2, X)
y1_, y2_ = max(y1, 0), min(y2, Y)
# Crop the image
crop = image[x1_: x2_, y1_: y2_]
# Pad the image if the specified size is larger than the input image size
if crop.ndim == 3:
crop = np.pad(crop,
((x1_ - x1, x2 - x2_), (y1_ - y1, y2 - y2_), (0, 0)),
'constant')
elif crop.ndim == 4:
crop = np.pad(crop,
((x1_ - x1, x2 - x2_), (y1_ - y1, y2 - y2_), (0, 0), (0, 0)),
'constant')
else:
print('Error: unsupported dimension, crop.ndim = {0}.'.format(crop.ndim))
exit(0)
return crop
def normalise_intensity(image, thres_roi=10.0):
""" Normalise the image intensity by the mean and standard deviation """
val_l = np.percentile(image, thres_roi)
roi = (image >= val_l)
mu, sigma = np.mean(image[roi]), np.std(image[roi])
eps = 1e-6
image2 = (image - mu) / (sigma + eps)
return image2
def rescale_intensity(image, thres=(1.0, 99.0)):
""" Rescale the image intensity to the range of [0, 1] """
val_l, val_h = np.percentile(image, thres)
image2 = image
image2[image < val_l] = val_l
image2[image > val_h] = val_h
image2 = (image2.astype(np.float32) - val_l) / (val_h - val_l)
#return image
return image2
def data_augmenter(image, label, shift, rotate, scale, intensity, flip):
"""
Online data augmentation
Perform affine transformation on image and label,
which are 4D tensor of shape (N, H, W, C) and 3D tensor of shape (N, H, W).
"""
image2 = np.zeros(image.shape, dtype=np.float32)
label2 = np.zeros(label.shape, dtype=np.int32)
for i in range(image.shape[0]):
# For each image slice, generate random affine transformation parameters
# using the Gaussian distribution
shift_val = [np.clip(np.random.normal(), -3, 3) * shift,
np.clip(np.random.normal(), -3, 3) * shift]
rotate_val = np.clip(np.random.uniform(-1,1), -3, 3) * rotate
scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale
intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity
# Apply the affine transformation (rotation + scale + shift) to the image
row, col = image.shape[1:3]
M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_val, 1.0 / scale_val)
M[:, 2] += shift_val
for c in range(image.shape[3]):
image2[i, :, :, c] = ndimage.interpolation.affine_transform(image[i, :, :, c],
M[:, :2], M[:, 2], order=1)
# Apply the affine transformation (rotation + scale + shift) to the label map
label2[i, :, :] = ndimage.interpolation.affine_transform(label[i, :, :],
M[:, :2], M[:, 2], order=0)
# Apply intensity variation
image2[i] *= intensity_val
# Apply random horizontal or vertical flipping
if flip:
if np.random.uniform() >= 0.7:
image2[i] = image2[i, ::-1, :, :]
label2[i] = label2[i, ::-1, :]
#else:
if np.random.uniform() >= 0.7:
image2[i] = image2[i, :, ::-1, :]
label2[i] = label2[i, :, ::-1]
return image2, label2
def aortic_data_augmenter(image, label, shift, rotate, scale, intensity, flip):
"""
Online data augmentation
Perform affine transformation on image and label,
image: NXYC
label: NXY
"""
image2 = np.zeros(image.shape, dtype=np.float32)
label2 = np.zeros(label.shape, dtype=np.int32)
# For N image. which come come from the same subject in the LSTM model,
# generate the same random affine transformation parameters.
shift_val = [np.clip(np.random.normal(), -3, 3) * shift,
np.clip(np.random.normal(), -3, 3) * shift]
rotate_val = np.clip(np.random.normal(), -3, 3) * rotate
scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale
intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity
# The affine transformation (rotation + scale + shift)
row, col = image.shape[1:3]
M = cv2.getRotationMatrix2D(
(row / 2, col / 2), rotate_val, 1.0 / scale_val)
M[:, 2] += shift_val
# Apply the transformation to the image
for i in range(image.shape[0]):
for c in range(image.shape[3]):
image2[i, :, :, c] = ndimage.interpolation.affine_transform(
image[i, :, :, c], M[:, :2], M[:, 2], order=1)
label2[i, :, :] = ndimage.interpolation.affine_transform(
label[i, :, :], M[:, :2], M[:, 2], order=0)
# Apply intensity variation
image2[i] *= intensity_val
# Apply random horizontal or vertical flipping
if flip:
if np.random.uniform() >= 0.5:
image2[i] = image2[i, ::-1, :, :]
label2[i] = label2[i, ::-1, :]
else:
image2[i] = image2[i, :, ::-1, :]
label2[i] = label2[i, :, ::-1]
return image2, label2
def np_categorical_dice(pred, truth, k):
""" Dice overlap metric for label k """
A = (pred == k).astype(np.float32)
B = (truth == k).astype(np.float32)
return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B))
def distance_metric(seg_A, seg_B, dx):
"""
Measure the distance errors between the contours of two segmentations.
The manual contours are drawn on 2D slices.
We calculate contour to contour distance for each slice.
"""
table_md = []
table_hd = []
X, Y, Z = seg_A.shape
for z in range(Z):
# Binary mask at this slice
slice_A = seg_A[:, :, z].astype(np.uint8)
slice_B = seg_B[:, :, z].astype(np.uint8)
# The distance is defined only when both contours exist on this slice
if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:
# Find contours and retrieve all the points
_, contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
pts_A = contours[0]
for i in range(1, len(contours)):
pts_A = np.vstack((pts_A, contours[i]))
_, contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
pts_B = contours[0]
for i in range(1, len(contours)):
pts_B = np.vstack((pts_B, contours[i]))
# Distance matrix between point sets
M = np.zeros((len(pts_A), len(pts_B)))
for i in range(len(pts_A)):
for j in range(len(pts_B)):
M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])
# Mean distance and hausdorff distance
md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx
hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx
table_md += [md]
table_hd += [hd]
# Return the mean distance and Hausdorff distance across 2D slices
mean_md = np.mean(table_md) if table_md else None
mean_hd = np.mean(table_hd) if table_hd else None
return mean_md, mean_hd
def get_largest_cc(binary):
""" Get the largest connected component in the foreground. """
cc, n_cc = measure.label(binary)
max_n = -1
max_area = 0
for n in range(1, n_cc + 1):
area = np.sum(cc == n)
if area > max_area:
max_area = area
max_n = n
largest_cc = (cc == max_n)
return largest_cc
def remove_small_cc(binary, thres=10):
""" Remove small connected component in the foreground. """
cc, n_cc = measure.label(binary)
binary2 = np.copy(binary)
for n in range(1, n_cc + 1):
area = np.sum(cc == n)
if area < thres:
binary2[cc == n] = 0
return binary2
def split_sequence(image_name, output_name):
""" Split an image sequence into a number of time frames. """
nim = nib.load(image_name)
T = nim.header['dim'][4]
affine = nim.affine
image = nim.get_data()
for t in range(T):
image_fr = image[:, :, :, t]
nim2 = nib.Nifti1Image(image_fr, affine)
nib.save(nim2, '{0}{1:02d}.nii.gz'.format(output_name, t))
def make_sequence(image_names, dt, output_name):
""" Combine a number of time frames into one image sequence. """
nim = nib.load(image_names[0])
affine = nim.affine
X, Y, Z = nim.header['dim'][1:4]
T = len(image_names)
image = np.zeros((X, Y, Z, T))
for t in range(T):
image[:, :, :, t] = nib.load(image_names[t]).get_data()
nim2 = nib.Nifti1Image(image, affine)
nim2.header['pixdim'][4] = dt
nib.save(nim2, output_name)
def split_volume(image_name, output_name):
""" Split an image volume into a number of slices. """
nim = nib.load(image_name)
Z = nim.header['dim'][3]
affine = nim.affine
image = nim.get_data()
for z in range(Z):
image_slice = image[:, :, z]
image_slice = np.expand_dims(image_slice, axis=2)
affine2 = np.copy(affine)
affine2[:3, 3] += z * affine2[:3, 2]
nim2 = nib.Nifti1Image(image_slice, affine2)
nib.save(nim2, '{0}{1:02d}.nii.gz'.format(output_name, z))
def image_apply_mask(input_name, output_name, mask_image, pad_value=-1):
# Assign the background voxels (mask == 0) with pad_value
nim = nib.load(input_name)
image = nim.get_data()
image[mask_image == 0] = pad_value
nim2 = nib.Nifti1Image(image, nim.affine)
nib.save(nim2, output_name)
def padding(input_A_name, input_B_name, output_name, value_in_B, value_output):
nim = nib.load(input_A_name)
image_A = nim.get_data()
image_B = nib.load(input_B_name).get_data()
image_A[image_B == value_in_B] = value_output
nim2 = nib.Nifti1Image(image_A, nim.affine)
nib.save(nim2, output_name)
def auto_crop_image(input_name, output_name, reserve):
nim = nib.load(input_name)
image = nim.get_data()
X, Y, Z = image.shape[:3]
# Detect the bounding box of the foreground
idx = np.nonzero(image > 0)
x1, x2 = idx[0].min() - reserve, idx[0].max() + reserve + 1
y1, y2 = idx[1].min() - reserve, idx[1].max() + reserve + 1
z1, z2 = idx[2].min() - reserve, idx[2].max() + reserve + 1
x1, x2 = max(x1, 0), min(x2, X)
y1, y2 = max(y1, 0), min(y2, Y)
z1, z2 = max(z1, 0), min(z2, Z)
print('Bounding box')
print(' bottom-left corner = ({},{},{})'.format(x1, y1, z1))
print(' top-right corner = ({},{},{})'.format(x2, y2, z2))
# Crop the image
image = image[x1:x2, y1:y2, z1:z2]
# Update the affine matrix
affine = nim.affine
affine[:3, 3] = np.dot(affine, np.array([x1, y1, z1, 1]))[:3]
nim2 = nib.Nifti1Image(image, affine)
nib.save(nim2, output_name)
| [
"agnborowska@gmail.com"
] | agnborowska@gmail.com |
d8b8bd60e41caf83734e3c90563d6e80ca23a67d | 903534cec856db92d4d08c783c7584ce7cba632a | /caixaemail/caixaemail/wsgi.py | edfb0f46c6b40bc44b0a591464186f6d8afc078e | [] | no_license | paulovpereira/caixaEmail | 2fd619231c894a35f48ee486d0f2ffdb478ce0d0 | 854c397d6b62d949e5173663ef804bc209eb9cc2 | refs/heads/master | 2016-08-03T16:59:13.835180 | 2014-03-07T17:01:07 | 2014-03-07T17:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for caixaemail project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "caixaemail.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"paulo.vinicius117@gmail.com"
] | paulo.vinicius117@gmail.com |
ea98d8e05cb4511ae32a5753decfa4383195e542 | f50d5362778942d0940ff4e46f23e861696762a7 | /P1-Linear_Regression/P1-Linear_Regression.py | 824e8188e0b0b7624a88a64e49f323f711cd243e | [] | no_license | yairg98/Frequentist-Machine-Learning | 384750a7a607e2f0858d02be384efc763a6ebd5f | 6fac5550833f60e5357cb346b5aa8aec3ef5065c | refs/heads/master | 2023-01-01T04:01:09.361109 | 2020-10-27T01:56:38 | 2020-10-27T01:56:38 | 277,397,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,495 | py | import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
from sklearn import linear_model
training = 'https://raw.githubusercontent.com/yairg98/Freq-ML/master/P1-Linear_Regression/Construction_Training.csv'
validation = 'https://raw.githubusercontent.com/yairg98/Freq-ML/master/P1-Linear_Regression/Construction_Validation.csv'
testing = 'https://raw.githubusercontent.com/yairg98/Freq-ML/master/P1-Linear_Regression/Construction_Testing.csv'
# Normalize all data to the range (0,1) - (all features are always >=0)
def normalize(X):
for j in range(len(X[0])):
high = X[0][j]
low = X[0][j]
for i in X:
high = max(high, i[j])
low = min(low, i[j])
for i in X:
i[j] = (i[j]-low)/(high-low)
return X
# Download and separate dataset into input features (X) and outputs (y)
def getData(url):
# Retrieve data from Github and store in Pandas Dataframe
df = pd.read_csv(url)
# Specify the output column ('charges', in this case)
y = df['Y']
del df['Y']
columns = list(df.columns.values)
# Define feature matrix
X = df.to_numpy()
# Normalize the input data:
X = normalize(X)
# Add leading column of 1s:
new_col = []
for i in range(len(X)):
new_col.append(1)
X = np.insert(X, 0, new_col, axis=1)
return [X, y, columns]
# Find the betas given a dataset and, optionally, lamda (for ridge regression)
def getBeta_ridge(ds1, lamda=0):
X = ds1[0]
y = ds1[1]
# Calculate betas using equation 3.44
Xt = X.transpose()
I = lamda*np.identity(len(X[0]))
m = np.matmul(np.linalg.inv(np.add(np.matmul(Xt, X), I)), Xt)
beta = np.matmul(m, y)
return beta
# Find the betas for given dataset and lamda using sklearn's Lasso model
def getBeta_lasso(ds1, lamda):
x = ds1[0]
y = ds1[1]
lasso = linear_model.Lasso(alpha=lamda) # create model
lasso.fit(x, y) # train model
beta = lasso.coef_
beta[0] = lasso.intercept_
return beta
# Calcualte RMSE of linear regression model on ds1 using provided betas
def getRMSE(ds1, beta):
# Load validation/testing data
X = ds1[0]
y = ds1[1]
# Calclate RSS and MSE using equation 3.43
Xb = np.matmul(X, beta)
err = np.subtract(y, Xb)
err_t = err.transpose()
RSS = np.matmul(err_t, err)
MSE = RSS/len(y)
RMSE = math.sqrt(MSE)
return RMSE
# Return RMSE of ridge regression model with provided lamda and datasets
def tryModel_ridge(ds1, ds2, lamda=0):
beta = getBeta_ridge(ds1, lamda) # Train model on ds1
RMSE = getRMSE(ds2, beta) # Get RMSE of model on ds2
return RMSE
# Return RMSE of lasso regression model with provided lamda and datasets
def tryModel_lasso(ds1, ds2, lamda):
beta = getBeta_lasso(ds1, lamda)
RMSE = getRMSE(ds2, beta)
return RMSE
# Evaluate ridge regression model on ds2 for each lamda in S
def bestLam_ridge(ds1, ds3, S):
min_err = tryModel_ridge(ds1, ds3, S[0])
for i in S:
err = tryModel_ridge(ds1, ds3, i)
if err < min_err:
min_err = err
lam = i
return lam
# Evaluate ridge regression model on ds2 for each lamda in S
def bestLam_lasso(ds1, ds3, S):
lam = S[0]
min_err = tryModel_lasso(ds1, ds3, lam)
for i in S:
err = tryModel_lasso(ds1, ds3, i)
if err < min_err:
min_err = err
lam = i
return lam
'''
Part A - Unregulated linear regression:
Using the same function as for Ridge regression, but allowing lamda to
default to zero, thereby functioning as an unregulated regression
'''
print("Part A - Unregularized Linear Regression:")
ds1 = getData(training) # load training set
ds2 = getData(testing) # load testing set
print("RMSE of the unregularized linear regression model on the testing set: "
+ str(tryModel_ridge(ds1, ds2)))
print("RMSE of the unregularized linear regression model on the training set: "
+ str(tryModel_ridge(ds1, ds1)))
print()
'''
Part B - Ridge regression:
Testing for lamda between 0 and 20 (increments of 1).
Surprisingly, the performance of te model on the testing set seems be worse
after regularization.
'''
print("Part B - Ridge Regression:")
ds3 = getData(validation) # load validation set
S = range(20)
lam = bestLam_ridge(ds1, ds3, S)
print("Best lamda as evaluated on validation set: " + str(lam))
print("RMSE on testing set, using lamda=" + str(lam) + ": "
+ str(tryModel_ridge(ds1, ds2, lam)))
print("RMSE on training set, using lamda=" + str(lam) + ": "
+ str(tryModel_ridge(ds1, ds1, lam)))
print()
# Creating Ridge plot
beta_mat = [] # Matrix of all beta coefficients for all tested lamdas
for lam in S:
beta_mat.append(getBeta_ridge(ds1, lam))
beta_mat = np.array(beta_mat).T.tolist() # Transpose the beta matrix
plt.figure(1)
for x in beta_mat:
plt.plot(S, x)
plt.xlabel('λ')
plt.ylabel('Coefficients')
plt.title('Part B: Ridge Plot')
columns = ds1[2]
plt.legend(labels=columns)
'''
Part C - Lasso regression:
Testing for log(lamda) at 100 values between -5 and 5.
Here too, the performance of the model seems to be worse after performing
lasso regularization.
'''
print("Part C - Lasso regression:")
S = np.linspace(-5, 5, num=100) # range and number of tested lamda values
bestLam = bestLam_lasso(ds1, ds3, 10**S)
print("Best lamda as evaluated on validation set: " + str(bestLam))
print("RMSE on testing set, using lamda=" + str(bestLam) + ": "
+ str(tryModel_lasso(ds1, ds2, bestLam)))
print("RMSE on training set, using lamda=" + str(bestLam) + ": "
+ str(tryModel_lasso(ds1, ds1, bestLam)))
print()
# Creating Lasso plot
beta_mat = [] # Matrix of all beta coefficients for all tested lamdas
for lam in 10**S:
beta_mat.append(getBeta_lasso(ds1, lam))
beta_mat = np.array(beta_mat).T.tolist() # Transpose the beta matrix
plt.figure(2)
for x in beta_mat:
plt.plot(S, x)
plt.xlabel('log(λ)')
plt.ylabel('Coefficients')
plt.title('Part B: Lasso Plot')
plt.legend(labels=columns)
# Creating Predicted vs Actual y_out plot
plt.figure(3)
y_real = ds2[1]
y_out = np.matmul(ds2[0], getBeta_lasso(ds1, bestLam))
plt.plot(y_real, y_out, 'o')
plt.xlabel('Actual Value')
plt.ylabel('Predicted Value')
plt.title('Actual vs Predicted Y-Values')
m, b = np.polyfit(y_real, y_out, 1)
plt.plot(y_real, m*y_real + b)
caption = "Line of best fit: y = "+str(round(m,2))+"x + "+str(round(b,2))
plt.text(8,65,caption)
'''
Comments:
Neither Ridge nor Lasso regularization improved upon the initial model
(unregularized). An explanation for this may be suggested by another
peculiar aspect of the data, that each model performed only slightly better
on the testing data than on the training data. Whereas regularization was
used to avoid overfitting, poor performance on the training data would
suggest that our linear model was actually underfitting the data. This
could be due to (1) an incomplete/insufficient feature set, (2) the data
being partially random/non-deterministic, or (3) the data being inherently
non-linear.
That being said, all three models did show a clear correlation between the
actual and the predicted outputs, even if there was a lot of noise.
'''
| [
"noreply@github.com"
] | noreply@github.com |
68f8a48b2c8fe732d16c7e88d7f6aabe310eab73 | b671ed28ebb5f9ce6d3fd945fb596d66366469b8 | /client_list_person_server.py | f58aadf856ff956d0cd8e23e018ee4ac45365426 | [] | no_license | robtreacycollege/College_Day3 | d66c215f7248b6141ef91491035e757435afcf8d | 87fbf75d44313160d550448b7f1a00ca98f0905d | refs/heads/master | 2021-01-15T17:09:43.165972 | 2015-03-11T22:32:55 | 2015-03-11T22:32:55 | 32,019,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | __author__ = 'anngordon'
import time
import addressbook_pb2
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_FILE_NAME = 'output'
class Client_List_Person_From_File(addressbook_pb2.EarlyAdopterClient_List_Person_From_FileServicer):
def Client_List_Person(self, request, context):
print("On server before print out details")
print(request.SerializeToString())
print("On server before returning")
return addressbook_pb2.ServerResponse(message='Server Response: Success write out')
def serve():
server = addressbook_pb2.early_adopter_create_Client_List_Person_From_File_server(
Client_List_Person_From_File(), 50051, None, None)
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop()
if __name__ == '__main__':
serve() | [
"robtreacywork@gmail.com"
] | robtreacywork@gmail.com |
24b6a32a2f199199b4ed496fe7d3303761afda58 | e8486731cced94056574e92f8d7fb11b36d49518 | /zzh/build/lib/zzh/middlewares.py | 84d6db246ac9ac44c166bc471d9cdca82d66bae4 | [] | no_license | jinuoA/spider_zzh | 3bef91350b0794ef22dc60a261e890b82c08e99b | 3db097b28679b520d2abedac05e544f98b0b89e2 | refs/heads/master | 2022-12-11T10:01:12.685038 | 2019-05-06T03:05:45 | 2019-05-06T03:05:45 | 185,114,452 | 0 | 0 | null | 2022-12-08T01:45:15 | 2019-05-06T03:04:27 | Python | UTF-8 | Python | false | false | 3,269 | py | # encoding=utf-8
import random
import scrapy
from scrapy import log
import time
import os
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from .conf.user_agents import (
agents
)
# logger = logging.getLogger()
class ProxyMiddleWare(object):
"""docstring for ProxyMiddleWare"""
def process_request(self, request, spider):
'''对request对象加上proxy'''
proxy = self.get_random_proxy()
if request.meta.get('splash', None):
print("this is splash request ip:" + proxy)
request.meta['splash']['args']['proxy'] = proxy
request.headers["Proxy-Authorization"] = proxy
else:
print("this is request ip:" + proxy)
request.meta['proxy'] = proxy
def process_response(self, request, response, spider):
'''对返回的response处理'''
# 如果返回的response状态不是200,重新生成当前request对象
if response.status != 200:
proxy = self.get_random_proxy()
print("this is response ip:" + proxy)
if request.meta.get('splash', None):
print("this is splash request ip:" + proxy)
request.meta['splash']['args']['proxy'] = proxy
request.headers["Proxy-Authorization"] = proxy
else:
print("this is request ip:" + proxy)
request.meta['proxy'] = proxy
return request
return response
def get_random_proxy(self):
'''随机从文件中读取proxy'''
path = os.path.dirname(os.path.realpath(__file__))
while 1:
with open(path + '\\conf\\ip_proxy.ini', 'r') as f:
proxies = f.readlines()
if proxies:
break
else:
time.sleep(1)
proxy = random.choice(proxies).strip()
return proxy
# url_agent
class RotateUserAgentMiddleware(object):
"""docstring for url_agent"""
def __init__(self, user_agent=''):
self.user_agent = user_agent
self.reconnect_num = 0
def process_request(self, request, spider):
url_agent = random.choice(agents)
if url_agent:
print('this is User-Agent', url_agent)
request.headers.setdefault('User-Agent', url_agent)
def process_response(self, request, response, spider):
'''对返回的response处理, setting禁止重定向的'''
# 如果返回的response状态不是200,重新生成当前request对象
# 200请求成功、301重定向, 302临时重定向,
# 303重定向、400请求错误、401未授权、403禁止访问、404文件未找到、500服务器错误
status_code = [200, 301, 302, 303, 404, 500]
if response.status not in status_code:
self.reconnect_num += 1
if self.reconnect_num > 9:
self.reconnect_num = 0
return response
url_agent = random.choice(agents)
print("response.status:", response.status)
print("restart agent:" + url_agent)
# 对当前reque加上代理
request.headers.setdefault('User-Agent', url_agent)
return request
return response
| [
"wangjun@zhengzihui.com"
] | wangjun@zhengzihui.com |
c89ee79327c7d095b6dc5eaf86fa82a7e432dbd6 | ef38e5fe75027168292d5fc3709944fb079e8186 | /爬虫案例/爬虫3/案例6-添加手机请求头.py | c8544f69bb65e4933f46974fb454a7231f6d00f3 | [] | no_license | s123-x/Spider1907 | a6122cb5f310483aa94f6c559a6c546a0462888c | 14bca857d666b6e20455efc0951abc796b38f40a | refs/heads/master | 2022-12-01T01:04:34.982652 | 2020-08-13T03:36:12 | 2020-08-13T03:36:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | '''
添加手机请求头,模拟手机浏览器
'''
from urllib import request,parse
url = 'http://httpbin.org/post'
dict = {'name': 'admin'}
# 参数转化为byte
data = bytes(parse.urlencode(dict), encoding='utf8')
headers={
'User-Agent': 'Mozilla/5.0(iPhone;U;CPUiPhoneOS4_3_3likeMacOSX;en-us)AppleWebKit/533.17.9(KHTML,likeGecko)Version/5.0.2Mobile/8J2Safari/6533.18.5',
}
# 自己组装的请求
req =request.Request(url=url,data=data,headers=headers)
response = request.urlopen(req)
print(response.status)
print(response.read().decode('utf-8')) | [
"zhengyang-zhou@163.com"
] | zhengyang-zhou@163.com |
6d2bf13154ad6fdcf5d59e30141b1b138adf553d | d8c3167deb6865b3f6ba5caa3d641ebb77be4382 | /fanlytiks.py | 0cfdd05b89fd71bc4931e44f6b633714ebbb1b6b | [] | no_license | milanmenezes/fanlytiks | 490f72201e931c39fe4cb60a42b72528cd8184b0 | 6c83e06a44f3d8d25a73fd75ceeda838dbb9590a | refs/heads/master | 2020-03-18T16:35:41.401066 | 2018-06-01T13:35:07 | 2018-06-01T13:35:07 | 134,975,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | from flask import Flask, render_template, request, send_from_directory
import requests
import psycopg2
import sys
reload(sys)
sys.setdefaultencoding('utf8')
app = Flask(__name__)
# @app.route('/sitemap.xml')
# def static_from_root():
# return send_from_directory(app.static_folder, request.path[1:])
@app.route('/')
@app.route('/fanlytiks')
def index():
conn = psycopg2.connect(database = "aws", user = "milanmenezes", password = "nightfury", host = "milan-aws.crbk9i7trzoq.ap-south-1.rds.amazonaws.com", port = "5432")
#general info
info={}
cur = conn.cursor()
cur.execute("select count(*) from datastore;")
info["tcount"]=cur.fetchone()[0]
cur.close()
cur = conn.cursor()
cur.execute("select count(*) from (select distinct userid from datastore) as a;")
info["ucount"]=cur.fetchone()[0]
cur.close()
cur = conn.cursor()
cur.execute("select count(*) from datastore where media")
info["mcount"]=cur.fetchone()[0]
cur.close()
#latest tweets
cur = conn.cursor()
cur.execute("select distinct ttime, twtext from datastore order by ttime desc limit 10;")
latest=cur.fetchall()
cur.close()
#tweets with most retweets
cur = conn.cursor()
cur.execute("Select max(retweet) as r, twtext FROM datastore group by twtext order by r desc limit 10;")
retweet=cur.fetchall()
cur.close()
#tweets with most favourites
cur = conn.cursor()
cur.execute("Select max(favourite) as r, twtext FROM datastore group by twtext order by r desc limit 10;")
favourite=cur.fetchall()
cur.close()
#tweets per day
cur = conn.cursor()
cur.execute("select to_char(ttime,'YYYY-MM-DD') as day, count(to_char(ttime,'DD-MM-YY')) as tweets from datastore group by day order by day;")
tpd=cur.fetchall()
cur.close()
x=['x']
y=['tweets']
for i in tpd:
x.append(i[0])
y.append(int(i[1]))
tpd=[x,y]
# return str(data)
return render_template("index.html",info=info,tpd=tpd,latest=latest,retweet=retweet, favourite=favourite)
if __name__ == '__main__':
app.run(host='0.0.0.0') | [
"milanmenezes@gmail.com"
] | milanmenezes@gmail.com |
48f118a8e7d7219a984a8b2601ee89cd22b87f28 | 15fe288d5f8983a6c185cd0bcb7e0deac97efb62 | /mylib/my_logger.py | 360720637e4d244a922015b757bf66e90e528ec7 | [] | no_license | oyucube/traffic | 98955cd37e1b64bed752c94e3e0f442afbd7fadd | 6f4bc3873281b6c8cce91a904836f887947de0f2 | refs/heads/master | 2020-03-20T12:56:52.718451 | 2019-02-08T05:56:13 | 2019-02-08T05:56:13 | 137,444,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class LOGGER:
def __init__(self, log_file_path, file_id, log_mode=0, n_epoch=30):
self.log_file_path = log_file_path
self.file_id = file_id
self.log_mode = log_mode
self.log_buf = []
self.val_array = np.full_like(np.zeros(n_epoch), np.nan)
self.train_array = np.full_like(np.zeros(n_epoch), np.nan)
self.loss_array = np.full_like(np.zeros(n_epoch), np.nan)
self.baseline = np.full_like(np.zeros(n_epoch), np.nan)
self.max_acc = 0
self.best = ""
with open(log_file_path + "log.txt", "w") as f:
f.write(" ")
def l_print(self, sentence):
print(sentence)
self.log_buf.append(sentence)
return
def update_log(self):
with open(self.log_file_path + "log.txt", "a") as f:
for buf in self.log_buf:
f.write(buf)
self.log_buf = []
return
def set_loss(self, loss, epoch):
self.loss_array[epoch] = loss
def set_acc(self, train, val, epoch):
self.train_array[epoch] = train
self.val_array[epoch] = val
self.best = ""
if val > self.max_acc:
self.max_acc = val
self.best = "best_"
self.l_print("train_acc:{:1.4f} test_acc:{:1.4f}\n".format(train, val))
def set_baseline(self, b, epoch):
self.baseline[epoch] = b
def save_acc(self):
np.save(self.log_file_path + self.file_id + "test.npy", self.val_array)
np.save(self.log_file_path + self.file_id + "train.npy", self.train_array)
np.save(self.log_file_path + self.file_id + "base.npy", self.baseline)
plt.figure()
plt.ylim([0, 1])
p1 = plt.plot(self.val_array, color="green")
p2 = plt.plot(self.train_array, color="blue")
plt.legend((p1[0], p2[0]), ("test", "train"), loc=2)
plt.savefig(self.log_file_path + self.file_id + "acc.png")
plt.figure()
plt.plot(self.loss_array)
plt.savefig(self.log_file_path + self.file_id + "loss.png")
plt.figure()
plt.plot(self.baseline)
plt.savefig(self.log_file_path + self.file_id + "base.png")
plt.close("all")
| [
"y-murata@ist.osaka-u.ac.jp"
] | y-murata@ist.osaka-u.ac.jp |
cf8a4e8ee9ab5bec84107722532d44c30bb836ac | 0494c9caa519b27f3ed6390046fde03a313d2868 | /commit-queue/tests/buildbot_json_test.py | 561e59a3ca46c6615888e6b6922332b1b8423227 | [
"BSD-3-Clause"
] | permissive | mhcchang/chromium30 | 9e9649bec6fb19fe0dc2c8b94c27c9d1fa69da2c | 516718f9b7b95c4280257b2d319638d4728a90e1 | refs/heads/master | 2023-03-17T00:33:40.437560 | 2017-08-01T01:13:12 | 2017-08-01T01:13:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,606 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for buildbot_json.py."""
import json
import logging
import os
import cStringIO
import StringIO
import sys
import unittest
import urllib
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(ROOT_DIR, '..'))
import find_depot_tools # pylint: disable=W0611
from testing_support import auto_stub
# in tests/
import reduce_test_data # pylint: disable=F0401
# In root
import buildbot_json
class BuildbotJsonTest(auto_stub.TestCase):
def setUp(self):
super(BuildbotJsonTest, self).setUp()
# Default mock.
self.old_urlopen = self.mock(urllib, 'urlopen', self.mockurlopen)
self.mock(sys, 'stderr', cStringIO.StringIO())
self.mock(sys, 'stdout', cStringIO.StringIO())
self.mock(buildbot_json.time, 'time', lambda: 1325394000.01)
self.url = 'http://build.chromium.org/p/tryserver.chromium'
self.datadir = os.path.join(ROOT_DIR, 'data')
if not os.path.isdir(self.datadir):
os.mkdir(self.datadir)
self.test_id = self.id().split('BuildbotJsonTest.', 1)[1]
self.filepath = os.path.join(self.datadir, self.test_id) + '.json'
self.queue = []
self.training = False
if os.path.isfile(self.filepath):
self.queue = json.load(open(self.filepath))
# Auto upgrade old data.
for i in xrange(len(self.queue)):
url = self.queue[i][0]
if not url.endswith('filter=1'):
if '?' in url:
url += '&filter=1'
else:
url += '?filter=1'
self.queue[i][0] = url
logging.warn('Auto-convert to training because missing filter=1.')
self.training = True
self.queue_index = 0
self.reducer = reduce_test_data.Filterer()
def tearDown(self):
try:
if not self.has_failed():
if self.queue_index < len(self.queue):
self.queue = self.queue[:self.queue_index]
logging.warning('Auto-convert to training because of queue overflow')
self.training = True
if self.training:
json.dump(self.queue, open(self.filepath, 'w'), separators=(',',':'))
self.assertEqual(self.queue_index, len(self.queue))
self.assertOut('stderr', '')
self.assertOut('stdout', '')
else:
if self.training:
logging.error('Not saving data even if in training mode.')
finally:
# Make sure the super class tearDown() function is called so stubs are
# removed.
super(BuildbotJsonTest, self).tearDown()
if self.training:
self.fail(
'Don\'t worry, it\'s just updating internal files. Please run '
'again.\n%s' % '\n'.join(q[0] for q in self.queue))
def assertOut(self, out, expected):
"""Check stderr/stdout and resets it."""
self.assertEqual(str(expected), str(getattr(sys, out).getvalue()))
self.mock(sys, out, cStringIO.StringIO())
def mockurlopen(self, url):
self.assertTrue(self.queue_index <= len(self.queue))
if self.queue_index != len(self.queue):
expected_url, data = self.queue[self.queue_index]
if url != expected_url:
logging.warn(
'Auto-convert to training because %s != %s.' % (url, expected_url))
self.training = True
# Delete the remainder of the queue.
self.queue = self.queue[:self.queue_index]
if self.queue_index == len(self.queue):
data = self.old_urlopen(url).read()
self.training = True
# Re-filter it.
try:
data = json.loads(data)
except ValueError:
self.fail('Failed to decode %s' % url)
expected_url, new_data = self.reducer.filter_response(url, data)
assert new_data
new_data_json = json.dumps(new_data, separators=(',',':'))
if self.queue_index == len(self.queue):
self.queue.append((url, new_data_json))
elif new_data != data:
logging.warn(
'Auto-convert to training because url %s\n%s != %s.' % (
url, data, new_data))
self.queue[self.queue_index] = [url, new_data_json]
self.training = True
channel = StringIO.StringIO(new_data_json)
channel.headers = '<mocked headers>'
self.queue_index += 1
return channel
def testCommands(self):
# Assert no new command was added, otherwise a test needs to be written.
expected = [
'busy',
'builds',
'count',
'current',
'disconnected',
'help',
'idle',
'interactive',
'last_failure',
'pending',
'run',
]
actual = [i[3:] for i in dir(buildbot_json) if i.startswith('CMD')]
self.assertEqual(sorted(expected), sorted(actual))
for i in actual:
self.assertTrue(hasattr(self, 'testCMD' + i))
def testCMDbusy(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDbusy(parser, [self.url, '-b', 'linux']))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDbuilds(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDbuilds(
parser, [self.url, '-b', 'linux', '-s', 'vm146-m4', '-q']))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDcount(self):
self.mock(buildbot_json.time, 'time', lambda: 1348166285.56)
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDcount(
parser, [self.url, '-b', 'linux', '-o' '360']))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDdisconnected(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDdisconnected(parser, [self.url]))
self.assertOut(
'stdout',
'vm112-m4\nvm122-m4\nvm124-m4\nvm131-m4\nvm134-m4\nvm139-m4\nvm143-m4\n'
'vm146-m4\nvm157-m4\nvm162-m4\nvm165-m4\nvm60-m4\nvm62-m4\nvm64-m4\n')
def testCMDhelp(self):
parser = buildbot_json.gen_parser()
self.assertEqual(0, buildbot_json.CMDhelp(parser, []))
# No need to check exact output here.
# pylint: disable=E1101
self.assertTrue(
'show program\'s version number and exit\n' in sys.stdout.getvalue())
self.mock(sys, 'stdout', cStringIO.StringIO())
def testCMDidle(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDidle(parser, [self.url, '--builder', 'linux_clang']))
self.assertOut(
'stdout', 'Builder linux_clang: vm104-m4, vm113-m4, vm165-m4\n')
def testCMDinteractive(self):
self.mock(sys, 'stdin', cStringIO.StringIO('exit()'))
parser = buildbot_json.gen_parser()
try:
# TODO(maruel): Real testing.
buildbot_json.CMDinteractive(parser, [self.url])
self.fail()
except SystemExit:
pass
self.assertOut(
'stderr',
'Buildbot interactive console for "http://build.chromium.org'
'/p/tryserver.chromium".\nHint: Start with typing: '
'\'buildbot.printable_attributes\' or \'print str(buildbot)\' to '
'explore.\n')
self.assertOut('stdout', '>>> ')
def testCMDlast_failure(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDlast_failure(
parser, [self.url, '-b', 'linux', '--step', 'compile']))
self.assertOut(
'stdout',
'27369 on vm136-m4: blame:jam@chromium.org\n'
'27367 on vm158-m4: blame:jam@chromium.org\n')
def testCMDpending(self):
parser = buildbot_json.gen_parser()
self.assertEqual(0, buildbot_json.CMDpending(parser, [self.url]))
self.assertOut('stdout',
"Builder linux_touch: 2\n"
" revision: HEAD\n change:\n comment: u''\n"
" who: saintlou@google.com\n revision: HEAD\n change:\n"
" comment: u''\n who: saintlou@google.com\n")
def testCMDcurrent(self):
parser = buildbot_json.gen_parser()
self.assertEqual(0, buildbot_json.CMDcurrent(parser, [self.url]))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.txt'
if self.training or not os.path.isfile(filepath):
# pylint: disable=E1101
json.dump(sys.stdout.getvalue(), open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertOut('stdout', expected)
def testCMDrun(self):
parser = buildbot_json.gen_parser()
self.assertEqual(
0,
buildbot_json.CMDrun(
parser, [self.url, "print '\\n'.join(buildbot.builders.keys)"]))
self.assertOut('stdout', 'linux\nlinux_clang\nlinux_touch\n')
def testCurrentBuilds(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
actual = []
for builder in b.builders:
self.assertEqual([], list(builder.current_builds.cached_children))
i = 0
last_build = None
for c in builder.current_builds:
self.assertEqual(builder, c.builder)
actual.append(str(c))
i += 1
last_build = c
if i:
self.assertEqual(last_build.number, builder.builds[-1].number)
self.assertEqual(i, len(list(builder.current_builds.cached_children)))
builder.current_builds.discard()
self.assertEqual([], list(builder.current_builds.cached_children))
filepath = os.path.join(self.datadir, self.test_id) + '_expected.json'
if self.training or not os.path.isfile(filepath):
json.dump(actual, open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertEqual(expected, actual)
def test_builds_reverse(self):
# Check the 2 last builds from 'linux' using iterall() instead of
# __iter__(). The test also confirms that the build object itself is not
# loaded.
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
actual = []
for b in b.builders['linux'].builds.iterall():
actual.append(b.number)
# When using iterall() the Build data is delay loaded:
assert b._data is None # pylint: disable=W0212
if len(actual) == 2:
break
filepath = os.path.join(self.datadir, self.test_id) + '_expected.json'
if self.training or not os.path.isfile(filepath):
json.dump(actual, open(filepath, 'w'))
expected = json.load(open(filepath))
self.assertEqual(expected, actual)
def test_build_results(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
# builds.data['results'] is not present.
self.assertEqual(
buildbot_json.SUCCESS, b.builders['linux_clang'].builds[1638].result)
self.assertEqual(
buildbot_json.SUCCESS,
b.builders['linux_clang'].builds[1638].steps[0].result)
def test_build_steps_keys(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
build = b.builders['linux_clang'].builds[1638]
#self.assertEqual([0, 1, 2, 3], build.steps.keys)
# Grab cached version. There is none.
actual = [step for step in build.steps.cached_children]
self.assertEqual([], actual)
# Force load.
actual = [step for step in build.steps]
self.assertEqual(
[buildbot_json.SUCCESS] * 4, [step.result for step in actual])
self.assertEqual(
[True] * 4, [step.simplified_result for step in actual])
self.assertEqual(4, len(actual))
# Grab cached version.
actual = [step for step in build.steps.cached_children]
self.assertEqual(
[buildbot_json.SUCCESS] * 4, [step.result for step in actual])
self.assertEqual(4, len(actual))
def test_repr(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertEqual('<Builder key=linux>', repr(b.builders['linux']))
self.assertEqual("<Builders keys=['linux']>", repr(b.builders))
def test_refresh(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertEqual(True, b.refresh())
def test_build_step_cached_data(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
build = 30157
self.assertEqual(
None, b.builders['linux'].current_builds[build].steps[0].cached_data)
b.builders['linux'].current_builds[build].steps[0].cache()
self.assertEqual(
'update_scripts',
b.builders['linux'].current_builds[build].steps[0].name)
self.assertEqual(
['browser_tests', 'ui_tests'],
b.builders['linux'].current_builds[build].steps.failed)
self.assertEqual(
2,
b.builders['linux'].current_builds[build].steps[2
].cached_data['step_number'])
b.refresh()
# cache_keys() does the same thing as cache().
b.builders['linux'].current_builds[build].steps.cache_keys()
def test_contains(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertTrue('linux' in b.builders)
self.assertEqual(3, len(list(b.builders.cached_children)))
try:
# The dereference of an invalid key when keys are cached will throw an
# exception.
# pylint: disable=W0104
b.builders['non_existent']
self.fail()
except KeyError:
pass
def test_slaves(self):
b = buildbot_json.Buildbot('http://build.chromium.org/p/tryserver.chromium')
self.assertEqual(11, len(b.slaves.names))
self.assertEqual(False, b.slaves['mini34-m4'].connected)
def test_build_revision(self):
class Root(object):
@staticmethod
def read(_):
return {'sourceStamp': {'revision': 321}}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(321, build.revision)
def test_build_revision_none(self):
class Root(object):
@staticmethod
def read(_):
return {}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(None, build.revision)
def test_build_duration(self):
class Root(object):
@staticmethod
def read(_):
return {'times': [3, 15]}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(12, build.duration)
self.assertEqual(3, build.start_time)
self.assertEqual(15, build.end_time)
def test_build_duration_none(self):
class Root(object):
@staticmethod
def read(_):
return {}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(None, build.duration)
self.assertEqual(None, build.start_time)
self.assertEqual(None, build.end_time)
def test_build_steps_names(self):
class Root(object):
@staticmethod
def read(url): # pylint: disable=E0213
self.assertEqual('123', url)
return {'steps': [{'name': 'a'}, {'name': 'b'}]}
build = buildbot_json.Build(Root(), '123', None)
self.assertEqual(['a', 'b'], build.steps.keys)
def test_build_step_duration(self):
class Root(object):
@staticmethod
def read(_):
return {'steps': [{'times': [3, 15], 'isStarted': True}]}
build = buildbot_json.Build(Root(), '123', None)
build_step = buildbot_json.BuildStep(buildbot_json.BuildSteps(build), 0)
self.assertEqual(12, build_step.duration)
self.assertEqual(True, build_step.is_running)
self.assertEqual(True, build_step.is_started)
self.assertEqual(False, build_step.is_finished)
def test_build_step_duration_none(self):
class Root(object):
@staticmethod
def read(_):
return {'steps': [{}]}
build = buildbot_json.Build(Root(), '123', None)
build_step = buildbot_json.BuildStep(buildbot_json.BuildSteps(build), 0)
self.assertEqual(None, build_step.duration)
if __name__ == '__main__':
logging.basicConfig(level=
[logging.WARN, logging.INFO, logging.DEBUG][min(2, sys.argv.count('-v'))])
unittest.main()
| [
"1990zhaoshuang@163.com"
] | 1990zhaoshuang@163.com |
0feb26db0b3e1ad462a9a055b1f25937d285fe82 | 3f327d2654b85b922909925b9f475315d78f4652 | /Backend/newsapi/lib/python2.7/site-packages/newsapi/sources.py | a2865f6348bc04ca28a13159efcf5462a1d5167c | [
"MIT"
] | permissive | brianwang1217/SelfImprovementWebApp | 8db45914027537aee9614f9d218c93cc08dc90f8 | 7892fc4ee5434307b74b14257b29a5f05a0a0dd7 | refs/heads/master | 2022-12-13T15:01:08.595735 | 2018-06-23T04:46:06 | 2018-06-23T04:46:06 | 137,548,289 | 1 | 1 | MIT | 2022-05-25T01:28:29 | 2018-06-16T02:48:52 | Python | UTF-8 | Python | false | false | 4,265 | py | from newsapi.base_news import BaseNews
class Sources(BaseNews):
def __init__(self, API_KEY):
super(Sources, self).__init__(API_KEY)
self.endpoint = "https://newsapi.org/v1/sources"
self.sources = []
self.sources_base_info = {}
self.sources_id_info = {}
self.categories = {}
self.languages = {}
self.countries = {}
def get(self, category="", language="", country="", attributes_format=True):
self.payload['category'] = category
self.payload['language'] = language
self.payload['country'] = country
r = self.requests.get(self.endpoint, params=self.payload)
if r.status_code != 200:
raise BaseException("Either server didn't respond or has resulted in zero results.")
try:
content = r.json()
except ValueError:
raise ValueError("No json data could be retrieved.")
if attributes_format:
return self.AttrDict(content)
return content
def all(self):
return self.get()
def get_by_category(self, category):
return self.get(category=category)
def get_by_language(self, language):
return self.get(language=language)
def get_by_country(self, country):
return self.get(country=country)
def information(self):
content = self.get()
self.sources = content.sources
for index, source in enumerate(self.sources):
category_name = source['category']
language_name = source['language']
country_name = source['country']
identifier = source['id']
name = source['name']
desciption = source['description']
url = source['url']
urls_to_logos = source['urlsToLogos']
sort_bys_available = source['sortBysAvailable']
self.sources_base_info[name] = url
self.sources_id_info[name] = identifier
temp_dict = {
"id": identifier, "name": name,
"description": desciption, "url": url,
"urls_to_logos": urls_to_logos,
'sort_bys_available': sort_bys_available
}
if category_name in self.categories:
self.categories[category_name].append([temp_dict])
else:
self.categories[category_name] = [temp_dict]
if language_name in self.languages:
self.languages[language_name].append([temp_dict])
else:
self.languages[language_name] = [temp_dict]
if country_name in self.countries:
self.countries[country_name].append([temp_dict])
else:
self.countries[country_name] = [temp_dict]
return self
def all_sorted_information(self):
return self.sources
def all_categories(self, detailed=False):
if detailed:
return self.categories
return self.categories.keys()
def all_languages(self, detailed=False):
if detailed:
return self.languages
return self.languages.keys()
def all_countries(self, detailed=False):
if detailed:
return self.countries
return self.countries.keys()
def all_base_information(self):
return self.sources_base_info
def all_ids(self, detailed=False):
if detailed:
return self.sources_id_info
return self.sources_id_info.values()
def all_names(self, detailed=False):
if detailed:
return self.sources_base_info
return self.sources_base_info.keys()
def all_urls(self, detailed=False):
if detailed:
return self.sources_base_info
return self.sources_base_info.values()
def search(self, name):
matches = []
if not self.sources:
self.information()
for source in self.sources:
if name.lower() in source['name'].lower():
matches.append(source)
if not matches:
return "No match found!"
return matches
| [
"talk2shreyas@gmail.com"
] | talk2shreyas@gmail.com |
d8514ec6a03384c4d4b66ddbce1e0512079e660a | d085aa771a6f280dd80988e9df03e702c35be9c8 | /aayu/asgi.py | cc77094c9e795b9078c311de933fafb93bc1ee42 | [] | no_license | aayu1401/Coding-Culture | 6aa9c9c039e8e0e8a3f83438a99bf2110ea7109e | a2be2229cbaa93d83acd6242362a462376d6ce6a | refs/heads/master | 2022-12-22T21:17:20.643462 | 2020-09-28T16:47:29 | 2020-09-28T16:47:29 | 299,355,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for aayu project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aayu.settings')
application = get_asgi_application()
| [
"aayushi_m@me.iitr.ac.in"
] | aayushi_m@me.iitr.ac.in |
c8742c451b8f04a368affdf326423791bc8e12b1 | 92237641f61e9b35ff6af6294153a75074757bec | /Algorithm/programmers/lv1_약수의 합.py | efcd4bdb217f84c5119f9de34b91c09be9bd7a7f | [] | no_license | taepd/study | 8ded115765c4f804813e255d9272b727bf41ec80 | 846d3f2a5a4100225b750f00f992a640e9287d9c | refs/heads/master | 2023-03-08T13:56:57.366577 | 2022-05-08T15:24:35 | 2022-05-08T15:24:35 | 245,838,600 | 0 | 1 | null | 2023-03-05T23:54:41 | 2020-03-08T15:25:15 | JavaScript | UTF-8 | Python | false | false | 513 | py | """
문제 설명
정수 n을 입력받아 n의 약수를 모두 더한 값을 리턴하는 함수, solution을 완성해주세요.
제한 사항
n은 0 이상 3000이하인 정수입니다.
입출력 예
n return
12 28
5 6
입출력 예 설명
입출력 예 #1
12의 약수는 1, 2, 3, 4, 6, 12입니다. 이를 모두 더하면 28입니다.
입출력 예 #2
5의 약수는 1, 5입니다. 이를 모두 더하면 6입니다.
"""
def solution(n):
return sum([i for i in range(1, n//2+1) if n % i == 0]) + n | [
"taepd1@gmail.com"
] | taepd1@gmail.com |
f36ffffea1f4374ec233376ec27a22b0aaeb5bf5 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/tests/thread_contention_test.py | 518e5dcd40cd8122d86907338a77f8d5d156ebea | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 416 | py | from thread import start_new_thread
import time
work = []
done = []
def run(num):
for i in xrange(num):
t = work.pop()
work.append(t - 1)
done.append(num)
print "starting!"
nthreads = 2
N = 100000
for i in xrange(nthreads):
work.append(N)
for i in xrange(nthreads):
t = start_new_thread(run, (N,))
while len(done) < nthreads:
time.sleep(0)
# print work
assert sum(work) == 0
| [
"kmod@dropbox.com"
] | kmod@dropbox.com |
79a1d1b99544c0df6ff3fa556be040c933b22cd8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /4AtqpqKdXAFofa566_16.py | ea18541665d34bf4ba2b25fbe302826b6e09da0e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | """
Create a function that takes in a _number as a string_ `n` and returns the
number **without trailing and leading zeros**.
* **Trailing Zeros** are the zeros _after_ a decimal point which _don't affect the value_ (e.g. the _last three_ zeros in `3.4000` and `3.04000`).
* **Leading Zeros** are the zeros _before_ a whole number which _don't affect the value_ (e.g. the _first three_ zeros in `000234` and `000230`).
### Examples
remove_leading_trailing("230.000") ➞ "230"
remove_leading_trailing("00402") ➞ "402"
remove_leading_trailing("03.1400") ➞ "3.14"
remove_leading_trailing("30") ➞ "30"
### Notes
* Return a **string**.
* If you get a number with `.0` on the end, return the _integer value_ (e.g. return `"4"` rather than `"4.0"`).
* If the number is `0`, `0.0`, `000`, `00.00`, etc... return `"0"`.
"""
def remove_leading_trailing(n):
f = (float(n))
i = int(f)
if (f == float(i)):
return str(i)
else:
return str(f)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
227c8e7d7c7faf708582ddde5050af8f34a85ecd | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_4/models/smtp_server_get_response.py | b9d7c1f33db6e8adf4bc96d6abeba9d4958fb2b7 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,221 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.4, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_4 import models
class SmtpServerGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[SmtpServer]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.SmtpServer]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[SmtpServer])
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SmtpServerGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SmtpServerGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SmtpServerGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.